如何使用 Puppeteer 和 Headless Chrome 递归调用多个 URL?

How do I recurrsively call multiple URLs using Puppeteer and Headless Chrome?

我正在尝试编写一个程序来同时扫描多个 URL(并行化),并且我提取了站点地图并将其作为数组存储在变量中,如下所示。但我无法使用 Puppeteer 打开。我收到以下错误:

原始消息:'Cannot navigate to invalid URL'

下面是我的代码。有人可以帮帮我吗。

const sitemapper = require('@mastixmc/sitemapper');
const SitemapXMLParser = require('sitemap-xml-parser');
const url = 'https://edition.cnn.com/sitemaps/sitemap-section.xml';

/*If sitemapindex (link of xml or gz file) is written in sitemap, the URL will be accessed.
You can optionally specify the number of concurrent accesses and the number of milliseconds after processing and access to resume processing after a delay.
*/

const options = {
    delay: 3000,
    limit: 50000
};

const sitemapXMLParser = new SitemapXMLParser(url, options);

sitemapXMLParser.fetch().then(result => {
    var locs = result.map(value => value.loc)   
    var locsFiltered = locs.toString().replace("[",'<br>');
    const urls = locsFiltered
    console.log(locsFiltered)
   

const puppeteer = require("puppeteer");

async function scrapeProduct(url) {
    const urls = locsFiltered
    const browser = await puppeteer.launch({
        headless: false
    });
    for (i = 0; i < urls.length; i++) {
        const page = await browser.newPage();
        const url = urls[i];
        const promise = page.waitForNavigation({
            waitUntil: "networkidle2"
        });
        await page.goto(`${url}`);
    }};
   
    scrapeProduct();
    
}); 

您看到 URL 无效,因为您使用错误的方法将数组转换为 URL 字符串。
这些线路更好:

// var locsFiltered = locs.toString().replace("[",'<br>') // This is wrong
// const urls = locsFiltered                              // So value is invalid
// console.log(locsFiltered)

const urls = locs.map(value => value[0])  // This is better

为了抓取 CNN 网站,我添加了 puppeteer-cluster 以提高速度:

const { Cluster } = require('puppeteer-cluster')
const sitemapper = require('@mastixmc/sitemapper')
const SitemapXMLParser = require('sitemap-xml-parser')
const url = 'https://edition.cnn.com/sitemaps/sitemap-section.xml'


async function scrapeProduct(locs) {
    const urls = locs.map(value => value[0])
    const cluster = await Cluster.launch({
        concurrency: Cluster.CONCURRENCY_CONTEXT,
        maxConcurrency: 2, // You can set this to any number you like
        puppeteerOptions: {
            headless: false,
            devtools: false,
            args: [],
        }
    })

    await cluster.task(async ({ page, data: url }) => {
        await page.goto(url, {timeout: 0, waitUntil: 'networkidle2'})
        const screen = await page.screenshot()
        // Store screenshot, do something else
    })

    for (i = 0; i < urls.length; i++) {
        console.log(urls[i])
        await cluster.queue(urls[i])
    }

    await cluster.idle()
    await cluster.close()
}

/******
If sitemapindex (link of xml or gz file) is written in sitemap, the URL will be accessed.
You can optionally specify the number of concurrent accesses and the number of milliseconds after processing and access to resume processing after a delay.
*******/
const options = {
    delay: 3000,
    limit: 50000
}
const sitemapXMLParser = new SitemapXMLParser(url, options)
sitemapXMLParser.fetch().then(async result => {
    var locs = result.map(value => value.loc)
    await scrapeProduct(locs)
})