从多个 sitemap.xml 个文件中抓取网址

Crawling urls from multiple sitemap.xml files

我正在为一个页面构建一个 apiify actor,其中所有需要的 url 都存储在不同的 sitemap.xml 文件中。文件名是静态的,但无法弄清楚如何向演员添加几个 sitemap.xml 文件。

下面是 1 个 xml 文件的工作代码。不知何故需要对多个 url 进行 for-each,但由于它们大约有 600 个,因此最好通过从 csv 中提取所有站点地图之类的方法,然后为 url 抓取每个站点地图s,然后抓取每个url.

const Apify = require('apify');
const cheerio = require('cheerio');
const requestPromised = require('request-promise-native');

Apify.main(async () => {

    const xml = await requestPromised({
        url: 'https://www.website.com/sitemap1.xml’, // <- This part needs to accept input of about 600 sitemap.xml urls in total

        headers: {
        'User-Agent': 'curl/7.54.0'
        }
     });

    // Parse sitemap and create RequestList from it
    const $ = cheerio.load(xml);
    const sources = [];
    $('loc').each(function (val) {
        const url = $(this).text().trim();
        sources.push({
            url,
            headers: {
                // NOTE: Otherwise the target doesn't allow to download the page!
                'User-Agent': 'curl/7.54.0',
            }
        });
    });

    const requestList = new Apify.RequestList({
        sources,
    });
    await requestList.initialize();

    // Crawl each page from sitemap
    const crawler = new Apify.CheerioCrawler({
        requestList,
        handlePageFunction: async ({ $, request }) => {

            await Apify.pushData({
                url: request.url
            });
        },
    });

    await crawler.run();
    console.log('Done.');
});

每个 sitemap.xml 都有一个静态的 link/name,但是它们的内容每天都在变化,站点地图中 url 的总数是 60-70.000,就是那些 [=我最终需要获取 22=]s :-)

最可靠的方法是利用 Apify Crawler classes 的力量。当然有很多方法可以解决这个问题。

最简单的解决方案是使用一个 CheerioCrawler 并在 handlePageFunction 中为站点地图 URL 和最终 URL 使用单独的逻辑。不幸的是,CheerioCrawler 无法解析 XML(可能会在不久的将来修复)所以我们将不得不使用 2 个爬虫。

对于XML解析的第一部分,我们将使用BasicCrawler。它是 Apify 最通用的爬虫,因此它可以轻松使用您已有的代码。我们会将提取的 URL 推送到 requestQueue 并在第二个爬虫中处理它们,这几乎可以保持原样。

const Apify = require('apify');
const cheerio = require('cheerio');
const requestPromised = require('request-promise-native');

Apify.main(async () => {

    // Here we will push the URLs found in the sitemaps
    const requestQueue = await Apify.openRequestQueue();

    // This would be better passed via INPUT as `const xmlUrls = await Apify.getInput().then((input => input.xmlUrls))`
    const xmlUrls = [
        'https://www.website.com/sitemap1.xml',
        // ...
    ]

    const xmlRequestList = new Apify.RequestList({
        sources: xmlUrls.map((url) => ({ url })) // We make smiple request object from the URLs
    })

    await xmlRequestList.initialize();

    const xmlCrawler = new Apify.BasicCrawler({
        requestList: xmlRequestList,
        handleRequestFunction: async ({ request }) => {
            // This is basically the same code you have, we just have to push the sources to the queue
            const xml = await requestPromised({
                url: request.url,
                headers: {
                    'User-Agent': 'curl/7.54.0'
                }
            });

            const $ = cheerio.load(xml);
            const sources = [];
            $('loc').each(function (val) {
                const url = $(this).text().trim();
                sources.push({
                    url,
                    headers: {
                        // NOTE: Otherwise the target doesn't allow to download the page!
                        'User-Agent': 'curl/7.54.0',
                    }
                });
            });
            for (const finalRequest of sources) {
                await requestQueue.addRequest(finalRequest);
            }
        }
    })

    await xmlCrawler.run()

    // Crawl each page from sitemap
    const crawler = new Apify.CheerioCrawler({
        requestQueue,
        handlePageFunction: async ({ $, request }) => {
            // Add your logic for final URLs
            await Apify.pushData({
                url: request.url
            });
        },
    });

    await crawler.run();
    console.log('Done.');
});