如何从一个函数访问另一个函数中的数据
How to access data from one function in another function
我有一个网络抓取程序,在我的蜘蛛中,我需要访问我的 process_csv 函数中的信息,但我需要从我的 start_requests 函数中访问它。在我的 process_csv 函数中,我有一个变量 "ip",它从 CSV 文件中获取 ips,并将 http:// 添加到开头,将端口号添加到末尾,该函数继续将它们与需要抓取的用户代理和 URL。无论如何,我需要在我的 start_requests 函数中访问这些 ip,因此我需要从我的 start_requests 函数中调用变量 "ip"。我该怎么做?
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
# process the csv file so the url + ip address + useragent pairs are the same as defined in the file # returns a list of dictionaries, example:
# [ {'url': 'http://www.starcitygames.com/catalog/category/Rivals%20of%20Ixalan',
# 'ip': 'http://204.152.114.244:8050',
# 'ua': "Mozilla/5.0 (BlackBerry; U; BlackBerry 9320; en-GB) AppleWebKit/534.11"},
# ...
# ]
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares
# just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
splash_url = ip,
headers={"User-Agent": req["ua"]},
meta={
"proxy": (req["ip"]),
})
# Scraping
# def parse(self, response):
# item = GameItem()
# for game in response.css("tr"):
# # Card Name
# item["card_name"] = game.css("a.card_popup::text").extract_first()
# yield item
在您的 process_csv
函数中,您将使用 {"url": url, "ip": ip, "ua": useragent}
附加一个名为 data
的列表。所以你可以从函数中调用每条记录,这将 return 一个字典,然后你可以调用 ip。示例:
request_ip = process_csv(csv_file)[0]["ip"]
- 将 return 列表中第一条记录中的 ip。您可以 运行 循环获取所有 ip 并将它们存储在列表中。
希望这对您有所帮助。
我有一个网络抓取程序,在我的蜘蛛中,我需要访问我的 process_csv 函数中的信息,但我需要从我的 start_requests 函数中访问它。在我的 process_csv 函数中,我有一个变量 "ip",它从 CSV 文件中获取 ips,并将 http:// 添加到开头,将端口号添加到末尾,该函数继续将它们与需要抓取的用户代理和 URL。无论如何,我需要在我的 start_requests 函数中访问这些 ip,因此我需要从我的 start_requests 函数中调用变量 "ip"。我该怎么做?
SplashSpider.py
import csv
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from ..items import GameItem
# process the csv file so the url + ip address + useragent pairs are the same as defined in the file # returns a list of dictionaries, example:
# [ {'url': 'http://www.starcitygames.com/catalog/category/Rivals%20of%20Ixalan',
# 'ip': 'http://204.152.114.244:8050',
# 'ua': "Mozilla/5.0 (BlackBerry; U; BlackBerry 9320; en-GB) AppleWebKit/534.11"},
# ...
# ]
def process_csv(csv_file):
data = []
reader = csv.reader(csv_file)
next(reader)
for fields in reader:
if fields[0] != "":
url = fields[0]
else:
continue # skip the whole row if the url column is empty
if fields[1] != "":
ip = "http://" + fields[1] + ":8050" # adding http and port because this is the needed scheme
if fields[2] != "":
useragent = fields[2]
data.append({"url": url, "ip": ip, "ua": useragent})
return data
class MySpider(Spider):
name = 'splash_spider' # Name of Spider
# notice that we don't need to define start_urls
# just make sure to get all the urls you want to scrape inside start_requests function
# getting all the url + ip address + useragent pairs then request them
def start_requests(self):
# get the file path of the csv file that contains the pairs from the settings.py
with open(self.settings["PROXY_CSV_FILE"], mode="r") as csv_file:
# requests is a list of dictionaries like this -> {url: str, ua: str, ip: str}
requests = process_csv(csv_file)
for req in requests:
# no need to create custom middlewares
# just pass useragent using the headers param, and pass proxy using the meta param
yield SplashRequest(url=req["url"], callback=self.parse, args={"wait": 3},
splash_url = ip,
headers={"User-Agent": req["ua"]},
meta={
"proxy": (req["ip"]),
})
# Scraping
# def parse(self, response):
# item = GameItem()
# for game in response.css("tr"):
# # Card Name
# item["card_name"] = game.css("a.card_popup::text").extract_first()
# yield item
在您的 process_csv
函数中,您将使用 {"url": url, "ip": ip, "ua": useragent}
附加一个名为 data
的列表。所以你可以从函数中调用每条记录,这将 return 一个字典,然后你可以调用 ip。示例:
request_ip = process_csv(csv_file)[0]["ip"]
- 将 return 列表中第一条记录中的 ip。您可以 运行 循环获取所有 ip 并将它们存储在列表中。
希望这对您有所帮助。