如何用Scrapy抓取三个页面级别?

How to scrape three page levels with Scrapy?

如何使用元将项目包含在分页中?


    next_page = response.xpath('//a[@class="pages-next"]/@href').get()
    if next_page is not None:
        yield response.follow(next_page, callback=self.parse_allbranddevicesurl)

我很确定我传递的物品不正确。 (我设法只收集了 LEVEL2 第一页的所有数据点。)


代码如下:

class GsmSpider(scrapy.Spider):
    name = 'gsm'
    allowed_domains = ['gsmarena.com']
    start_urls = ['https://gsmarena.com/makers.php3']
   
    # LEVEL1 | all brands

    def parse(self, response):
        gsms = response.xpath('//div[@class="st-text"]/table//td')
        for gsm in gsms:
            allbranddevicesurl = gsm.xpath('.//a/@href').get()
            brandname = gsm.xpath('.//a/text()').get()
            devicecount = gsm.xpath('.//span/text()').get()
            
            yield response.follow(allbranddevicesurl, callback=self.parse_allbranddevicesurl,
                                    meta= {'brandname': brandname,
                                           'devicecount': devicecount})

    # LEVEL2 | all devices

    def parse_allbranddevicesurl(self, response):
        
        brandname = response.meta['brandname']
        devicecount = response.meta['devicecount']
        
        phones = response.xpath('//*[@id="review-body"]//li')
        for phone in phones:
            detailpageurl = phone.xpath('.//a/@href').get()

            yield response.follow(detailpageurl,
                                    callback=self.parse_detailpage,
                                    meta= {'brandname': brandname,
                                           'devicecount': devicecount,})

        next_page = response.xpath('//a[@class="pages-next"]/@href').get()
        if next_page is not None:
            yield response.follow(next_page, callback=self.parse_allbranddevicesurl)

    # LEVEL3 | detailpage

    def parse_detailpage(self, response):
     
        brandname = response.meta['brandname']
        devicecount = response.meta['devicecount']
         
        details = response.xpath('//div[@class="article-info"]')
        for detail in details:
            phonename = detail.xpath('.//h1/text()').get()
 
            yield {'brandname': brandname,
                   'devicecount': devicecount,
                   'phonename': phonename}

非常感谢您指出错误。

您忘记在2级内的下一页请求中添加元数据

class GsmSpider(scrapy.Spider):
    name = 'gsm'
    allowed_domains = ['gsmarena.com']
    start_urls = ['https://gsmarena.com/makers.php3']
    
    # LEVEL1 | all brands

    def parse(self, response):
        gsms = response.xpath('//div[@class="st-text"]/table//td')
        for gsm in gsms:
            allbranddevicesurl = gsm.xpath('.//a/@href').get()
            brandname = gsm.xpath('.//a/text()').get()
            devicecount = gsm.xpath('.//span/text()').get()
            
            yield response.follow(allbranddevicesurl, callback=self.parse_allbranddevicesurl,
                                    meta= {'brandname': brandname,
                                            'devicecount': devicecount})

    # LEVEL2 | all devices

    def parse_allbranddevicesurl(self, response):
        
        brandname = response.meta['brandname']
        devicecount = response.meta['devicecount']
        
        phones = response.xpath('//*[@id="review-body"]//li')
        for phone in phones:
            detailpageurl = phone.xpath('.//a/@href').get()

            yield response.follow(detailpageurl,
                                    callback=self.parse_detailpage,
                                    meta= {'brandname': brandname,
                                            'devicecount': devicecount,})

        next_page = response.xpath('//a[@class="pages-next"]/@href').get()
        if next_page is not None:

            # This is the fixed line:

            yield response.follow(next_page, callback=self.parse_allbranddevicesurl,
                                    meta={'brandname': brandname,
                                         'devicecount': devicecount})


    # LEVEL3 | detailpage

    def parse_detailpage(self, response):
        
        brandname = response.meta['brandname']
        devicecount = response.meta['devicecount']
            
        details = response.xpath('//div[@class="article-info"]')
        for detail in details:
            phonename = detail.xpath('.//h1/text()').get()
    
            yield {'brandname': brandname,
                    'devicecount': devicecount,
                    'phonename': phonename}