如何在抓取网页时单击下一步按钮
How to click next button while scraping a webpage
我正在使用 scrapy 抓取一个包含多页信息的网页,我需要程序单击下一个按钮,然后抓取下一页,然后继续这样做,直到抓取所有页面。但是我不知道该怎么做,我只能抓取第一页。
from scrapy_splash import SplashRequest
from ..items import GameItem
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition'] # url(s)
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, args={"wait": 3})
#Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
item["Name"] = game.css("a.card_popup::text").extract_first()
# Price
item["Price"] = game.css("td.deckdbbody.search_results_9::text").extract_first()
yield item
Documentation 对此非常明确:
from scrapy_splash import SplashRequest
from ..items import GameItem
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition'] # url(s)
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, args={"wait": 3})
#Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
item["Name"] = game.css("a.card_popup::text").extract_first()
# Price
item["Price"] = game.css("td.deckdbbody.search_results_9::text").extract_first()
yield item
next_page = response.css(<your css selector to find next page>).get()
if next_page is not None:
yield response.follow(next_page, self.parse)
您可以使用 css 选择器定位下一个按钮
a:nth-of-type(7)
和nth-child
a:nth-child(8)
您可以像下面这样工作:
import scrapy
class GameSpider(scrapy.Spider):
name = 'game_spider'
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition']
def parse(self, response):
for game in response.css("tr.deckdbbody_row"):
yield {
'name': game.css("a.card_popup::text").get(),
'price': game.css(".search_results_9::text").get(),
}
next_page_url = response.css('table+ div a:nth-child(8)::attr(href)').get()
if next_page_url is not None:
yield response.follow(next_page_url, self.parse)
我正在使用 scrapy 抓取一个包含多页信息的网页,我需要程序单击下一个按钮,然后抓取下一页,然后继续这样做,直到抓取所有页面。但是我不知道该怎么做,我只能抓取第一页。
from scrapy_splash import SplashRequest
from ..items import GameItem
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition'] # url(s)
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, args={"wait": 3})
#Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
item["Name"] = game.css("a.card_popup::text").extract_first()
# Price
item["Price"] = game.css("td.deckdbbody.search_results_9::text").extract_first()
yield item
Documentation 对此非常明确:
from scrapy_splash import SplashRequest
from ..items import GameItem
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition'] # url(s)
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, args={"wait": 3})
#Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
item["Name"] = game.css("a.card_popup::text").extract_first()
# Price
item["Price"] = game.css("td.deckdbbody.search_results_9::text").extract_first()
yield item
next_page = response.css(<your css selector to find next page>).get()
if next_page is not None:
yield response.follow(next_page, self.parse)
您可以使用 css 选择器定位下一个按钮
a:nth-of-type(7)
和nth-child
a:nth-child(8)
您可以像下面这样工作:
import scrapy
class GameSpider(scrapy.Spider):
name = 'game_spider'
start_urls = ['http://www.starcitygames.com/catalog/category/10th%20Edition']
def parse(self, response):
for game in response.css("tr.deckdbbody_row"):
yield {
'name': game.css("a.card_popup::text").get(),
'price': game.css(".search_results_9::text").get(),
}
next_page_url = response.css('table+ div a:nth-child(8)::attr(href)').get()
if next_page_url is not None:
yield response.follow(next_page_url, self.parse)