在 javascript 中关注 url - Scrapy Splash

Following urls in javascript - Scrapy Splash

我对网络抓取非常陌生。我设法从静态网站中提取信息,但现在我正在尝试跟踪 url 并提取数据(这当然涉及一些 javascript)。我已经为它安装了 scrapy-splash,运行 非常好。 我要抓取的网站是 https://www.ta.com/portfolio/investments/ari-network-services-inc,右上角的按钮会将您带到下一页(即 javascript,因此会启动)。我想在所有页面上抓取一些基本数据(如公司名称、部门等),直到最后一页。这是我到目前为止所做的,我需要帮助来更正它才能成功执行。


import scrapy
from scrapy_splash import SplashRequest
import urllib.parse as urlparse


class TAFolio(scrapy.Spider):
    name = 'Portfolio'
    start_urls = ['https://www.ta.com/portfolio/investments/ari-network-services-inc']

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url=url, callback = self.parse, args={"wait" : 3})

    def parse(self, response):

        companyname = response.css('h1.item_detail-main-info-heading::text').extract_first()
        sectors = response.css('.item_detail-main-info-group-item::text')[0].extract()
        investmentyear = response.css('.item_detail-main-info-group-item::text')[1].extract()
        status = response.css('.item_detail-main-info-group-item::text')[2].extract()
        location = response.css('.item_detail-main-info-group-item::text')[3].extract()
        region = response.css('.item_detail-main-info-group-item::text')[4].extract()
        team = response.css('div.item_detail-main-info-group a::text').extract()

        yield {
        'companyname': companyname,
        'sectors': sectors,
        'investmentyear': investmentyear,
        'status': status,
        'location': location,
        'region': region,
        'team': team
        }

        next_page = response.css('li.item_detail-nav-item--next a::attr(href)').extract()


        if next_page is not None:
            yield SplashRequest(urlparse.urljoin('https://www.ta.com',next_page),callback=self.parse, args={"wait":3})

这为我提供了 start_url 的正确信息,但没有进入下一页。

更新。问题出在我抓取网站的顺序上。以下是运行良好的更新代码。

import scrapy
from scrapy_splash import SplashRequest
import urllib.parse as urlparse


class TAFolio(scrapy.Spider):
    name = 'Portfolio'
    start_urls = [

    'https://www.ta.com/portfolio/business-services',
    'https://www.ta.com/portfolio/consumer',
    'https://www.ta.com/portfolio/financial-services',
    'https://www.ta.com/portfolio/healthcare',
    'https://www.ta.com/portfolio/technology'
    ]

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url=url, callback = self.parse, args={"wait" : 3})

    def parse(self, response):

        companylink = response.css('div.tiles.js-portfolio-tiles a::attr(href)').extract()
        for i in companylink:
            yield response.follow('https://www.ta.com' + str(i), callback=self.parse1)

    def parse1(self, response):

        companyname = response.css('h1.item_detail-main-info-heading::text').extract_first()
        sectors = response.css('.item_detail-main-info-group-item::text')[0].extract()
        investmentyear = response.css('.item_detail-main-info-group-item::text')[1].extract()
        status = response.css('.item_detail-main-info-group-item::text')[2].extract()
        location = response.css('.item_detail-main-info-group-item::text')[3].extract()
        region = response.css('.item_detail-main-info-group-item::text')[4].extract()
        team = response.css('div.item_detail-main-info-group a::text').extract()
        about_company = response.css('h2.item_detail-main-content-heading::text').extract()
        about_company_detail = response.css('div.markdown p::text').extract()

        yield {
        'companyname': companyname,
        'sectors': sectors,
        'investmentyear': investmentyear,
        'status': status,
        'location': location,
        'region': region,
        'team': team,
        'about_company': about_company,
        'about_company_detail' : about_company_detail
        }