使用 Scrapy 抓取网页时得到一些空输出
Getting some null outputs when scraping webpage using Scrapy
尝试抓取以下网页,http://www.starcitygames.com/catalog/category/1009?&start=0,在大多数情况下,我得到了我期望的值,但有些值返回 null,我不知道如何摆脱这些 Null并在那里获得实际价值。
[
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares # just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
headers={"User-Agent": req["ua"]},
splash_url = req["ip"],
)
# Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
yield {
'card_name': game.css("a.card_popup::text").get(),
'stock': game.css("td.deckdbbody.search_results_8::text").get(),
'price': game.css("td.deckdbbody.search_results_9::text").get()
}
items.py
导入 scrapy
class GameItem(scrapy.Item):
card_name = scrapy.Field()
stock = scrapy.Field()
price = scrapy.Field()
首先,您只需要处理包含卡信息的行,而忽略所有其他行。接下来你需要记住前一行中某些行的卡名 (current_card_name
):
def parse(self, response):
# item = GameItem()
current_card_name = ""
for card_row in response.xpath(
'//tr[starts-with(@class, "deckdbbody")]'):
card_name = card_row.xpath(
'.//a[@class="card_popup"]/text()').extract_first()
if not card_name:
card_name = current_card_name
else:
current_card_name = card_name
stock = card_row.xpath(
'.//td[contains(@class, "search_results_8")]/text()').extract_first()
price = card_row.xpath(
'.//td[contains(@class, "search_results_9")]/text()').extract_first()
yield {
'card_name': card_name,
'stock': stock,
'price': price
}
尝试抓取以下网页,http://www.starcitygames.com/catalog/category/1009?&start=0,在大多数情况下,我得到了我期望的值,但有些值返回 null,我不知道如何摆脱这些 Null并在那里获得实际价值。
[
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares # just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
headers={"User-Agent": req["ua"]},
splash_url = req["ip"],
)
# Scraping
def parse(self, response):
item = GameItem()
for game in response.css("tr"):
# Card Name
yield {
'card_name': game.css("a.card_popup::text").get(),
'stock': game.css("td.deckdbbody.search_results_8::text").get(),
'price': game.css("td.deckdbbody.search_results_9::text").get()
}
items.py
导入 scrapy
class GameItem(scrapy.Item):
card_name = scrapy.Field()
stock = scrapy.Field()
price = scrapy.Field()
首先,您只需要处理包含卡信息的行,而忽略所有其他行。接下来你需要记住前一行中某些行的卡名 (current_card_name
):
def parse(self, response):
# item = GameItem()
current_card_name = ""
for card_row in response.xpath(
'//tr[starts-with(@class, "deckdbbody")]'):
card_name = card_row.xpath(
'.//a[@class="card_popup"]/text()').extract_first()
if not card_name:
card_name = current_card_name
else:
current_card_name = card_name
stock = card_row.xpath(
'.//td[contains(@class, "search_results_8")]/text()').extract_first()
price = card_row.xpath(
'.//td[contains(@class, "search_results_9")]/text()').extract_first()
yield {
'card_name': card_name,
'stock': stock,
'price': price
}