抓取多个网页,结果输出乱序
Scraping multiple webpages and results are being outputted out of order
我正在抓取 3 URLs,每个都有多个页面。前 2 个链接有 2 页,第 3 个有 3 页。无论如何,当我抓取它们时,它抓取 URL 1 的第 1 页,然后是 URL 2 的第 1 页,而不是 URL 的第 2 页1. 然后它返回并抓取 URL 1 的第 2 页和 URL 2 的第 2 页,然后按顺序抓取 URL 3 的所有三页。那么为什么不按顺序抓取它们,比如 URL 1 的第 1,2 页; URL 2 的第 1,2 页,然后是 URL 的第 1,2,3 页 3. 有办法解决吗?
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
# process the csv file so the url + ip address + useragent pairs are the same as defined in the file
# returns a list of dictionaries, example:
# [ {'url': 'http://www.starcitygames.com/catalog/category/Rivals%20of%20Ixalan',
# 'ip': 'http://204.152.114.244:8050',
# 'ua': "Mozilla/5.0 (BlackBerry; U; BlackBerry 9320; en-GB) AppleWebKit/534.11"},
# ...
# ]
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares
# just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
headers={"User-Agent": req["ua"]},
splash_url = req["ip"],
)
# Scraping
def parse(self, response):
item = GameItem()
saved_name = ""
item["Category"] = response.css("span.titletext::text").extract()
for game in response.css("tr[class^=deckdbbody]"):
saved_name = game.css("a.card_popup::text").extract_first() or saved_name
item["card_name"] = saved_name.strip()
if item["card_name"] != None:
saved_name = item["card_name"].strip()
else:
item["card_name"] = saved_name
item["Condition"] = game.css("td[class^=deckdbbody].search_results_7 a::text").get()
item["stock"] = game.css("td[class^=deckdbbody].search_results_8::text").extract_first()
item["Price"] = game.css("td[class^=deckdbbody].search_results_9::text").extract_first()
yield item
next_page = response.xpath('//a[contains(., "- Next>>")]/@href').get()
if next_page is not None:
yield response.follow(next_page, self.parse)
存储 URL 的 CSV 文件
http://www.starcitygames.com/catalog/category/Duel%20Decks%20Venser%20vs%20Koth,204.152.114.229,Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a1) Gecko/20070308 Minefield/3.0a1
http://www.starcitygames.com/catalog/category/Duel%20Decks%20Zendikar%20vs%20Eldrazi,,
http://www.starcitygames.com/catalog/category/Duels%20of%20the%20Planeswalkers,,
可能是使用并发的缘故。
尝试在 settings.py:
下面的行中禁用并发 adding/changing
CONCURRENT_REQUESTS = 1
编辑:
哦,抱歉,让我们尝试另一件事。
你知道 属性 start_urls 的工作原理吗?
它是 Spider class 的特殊 属性,您可以为其分配一个 URL 列表,它会 运行 每个直到结束。
它将像这样工作:
class MySpyder(Spyder):
name = 'MySpyder'
start_urls = ['url1', 'url2'...]
def parse(self):
[do parse stuff]
您可以读取您的 csv 并将其转换为 url 列表。
我正在抓取 3 URLs,每个都有多个页面。前 2 个链接有 2 页,第 3 个有 3 页。无论如何,当我抓取它们时,它抓取 URL 1 的第 1 页,然后是 URL 2 的第 1 页,而不是 URL 的第 2 页1. 然后它返回并抓取 URL 1 的第 2 页和 URL 2 的第 2 页,然后按顺序抓取 URL 3 的所有三页。那么为什么不按顺序抓取它们,比如 URL 1 的第 1,2 页; URL 2 的第 1,2 页,然后是 URL 的第 1,2,3 页 3. 有办法解决吗?
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
# process the csv file so the url + ip address + useragent pairs are the same as defined in the file
# returns a list of dictionaries, example:
# [ {'url': 'http://www.starcitygames.com/catalog/category/Rivals%20of%20Ixalan',
# 'ip': 'http://204.152.114.244:8050',
# 'ua': "Mozilla/5.0 (BlackBerry; U; BlackBerry 9320; en-GB) AppleWebKit/534.11"},
# ...
# ]
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares
# just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
headers={"User-Agent": req["ua"]},
splash_url = req["ip"],
)
# Scraping
def parse(self, response):
item = GameItem()
saved_name = ""
item["Category"] = response.css("span.titletext::text").extract()
for game in response.css("tr[class^=deckdbbody]"):
saved_name = game.css("a.card_popup::text").extract_first() or saved_name
item["card_name"] = saved_name.strip()
if item["card_name"] != None:
saved_name = item["card_name"].strip()
else:
item["card_name"] = saved_name
item["Condition"] = game.css("td[class^=deckdbbody].search_results_7 a::text").get()
item["stock"] = game.css("td[class^=deckdbbody].search_results_8::text").extract_first()
item["Price"] = game.css("td[class^=deckdbbody].search_results_9::text").extract_first()
yield item
next_page = response.xpath('//a[contains(., "- Next>>")]/@href').get()
if next_page is not None:
yield response.follow(next_page, self.parse)
存储 URL 的 CSV 文件
http://www.starcitygames.com/catalog/category/Duel%20Decks%20Venser%20vs%20Koth,204.152.114.229,Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a1) Gecko/20070308 Minefield/3.0a1
http://www.starcitygames.com/catalog/category/Duel%20Decks%20Zendikar%20vs%20Eldrazi,,
http://www.starcitygames.com/catalog/category/Duels%20of%20the%20Planeswalkers,,
可能是使用并发的缘故。 尝试在 settings.py:
下面的行中禁用并发 adding/changingCONCURRENT_REQUESTS = 1
编辑: 哦,抱歉,让我们尝试另一件事。 你知道 属性 start_urls 的工作原理吗? 它是 Spider class 的特殊 属性,您可以为其分配一个 URL 列表,它会 运行 每个直到结束。
它将像这样工作:
class MySpyder(Spyder):
name = 'MySpyder'
start_urls = ['url1', 'url2'...]
def parse(self):
[do parse stuff]
您可以读取您的 csv 并将其转换为 url 列表。