在没有有效响应之前无法一一使用代理
Unable to use proxies one by one until there is a valid response
我在 python 的 scrapy 中编写了一个脚本,通过 get_proxies()
方法使用任一新生成的代理发出代理请求。我使用 requests
模块来获取代理以便在脚本中重用它们。但是,问题是我的脚本选择使用的代理可能并不总是好的,所以有时它无法获取有效响应。
How can I let my script keep trying with different proxies until there is a valid response?
到目前为止我的脚本:
import scrapy
import random
import requests
from itertools import cycle
from bs4 import BeautifulSoup
from scrapy.http.request import Request
from scrapy.crawler import CrawlerProcess
class ProxySpider(scrapy.Spider):
name = "sslproxies"
check_url = "https://whosebug.com/questions/tagged/web-scraping"
proxy_link = "https://www.sslproxies.org/"
def start_requests(self):
proxylist = self.get_proxies()
random.shuffle(proxylist)
proxy_ip_port = next(cycle(proxylist))
print(proxy_ip_port) #Checking out the proxy address
request = scrapy.Request(self.check_url, callback=self.parse,errback=self.errback_httpbin,dont_filter=True)
request.meta['proxy'] = "http://{}".format(proxy_ip_port)
yield request
def get_proxies(self):
response = requests.get(self.proxy_link)
soup = BeautifulSoup(response.text,"lxml")
proxy = [':'.join([item.select_one("td").text,item.select_one("td:nth-of-type(2)").text]) for item in soup.select("table.table tbody tr") if "yes" in item.text]
return proxy
def parse(self, response):
print(response.meta.get("proxy")) #Compare this to the earlier one whether they both are the same
def errback_httpbin(self, failure):
print("Failure: "+str(failure))
if __name__ == "__main__":
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'DOWNLOAD_TIMEOUT' : 5,
})
c.crawl(ProxySpider)
c.start()
PS My intension is to seek any solution the way I've started here.
你需要写一个下载器中间件,来安装一个process_exception
hook,scrapy 在抛出异常时调用这个钩子。在钩子中,你可以 return 一个新的 Request
对象,带有 dont_filter=True
标志,让 scrapy 重新安排请求直到它成功。
同时,您可以在process_response
hook 中广泛验证响应,检查状态码、响应内容等,并根据需要重新安排请求。
为了轻松更改代理,您应该使用内置 HttpProxyMiddleware
,而不是修补环境:
request.meta['proxy'] = proxy_address
以this project为例。
众所周知,http 响应需要通过所有中间件才能到达蜘蛛方法。
这意味着只有具有有效代理的请求才能进行蜘蛛回调函数。
为了使用有效代理,我们需要先检查所有代理,然后仅从有效代理中选择。
当我们之前选择的代理不再工作时 - 我们将此代理标记为无效并从蜘蛛中剩余的有效代理中选择一个新代理 errback
.
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.http.request import Request
class ProxySpider(scrapy.Spider):
name = "sslproxies"
check_url = "https://whosebug.com/questions/tagged/web-scraping"
proxy_link = "https://www.sslproxies.org/"
current_proxy = ""
proxies = {}
def start_requests(self):
yield Request(self.proxy_link,callback=self.parse_proxies)
def parse_proxies(self,response):
for row in response.css("table#proxylisttable tbody tr"):
if "yes" in row.extract():
td = row.css("td::text").extract()
self.proxies["http://{}".format(td[0]+":"+td[1])]={"valid":False}
for proxy in self.proxies.keys():
yield Request(self.check_url,callback=self.parse,errback=self.errback_httpbin,
meta={"proxy":proxy,
"download_slot":proxy},
dont_filter=True)
def parse(self, response):
if "proxy" in response.request.meta.keys():
#As script reaches this parse method we can mark current proxy as valid
self.proxies[response.request.meta["proxy"]]["valid"] = True
print(response.meta.get("proxy"))
if not self.current_proxy:
#Scraper reaches this code line on first valid response
self.current_proxy = response.request.meta["proxy"]
#yield Request(next_url, callback=self.parse_next,
# meta={"proxy":self.current_proxy,
# "download_slot":self.current_proxy})
def errback_httpbin(self, failure):
if "proxy" in failure.request.meta.keys():
proxy = failure.request.meta["proxy"]
if proxy == self.current_proxy:
#If current proxy after our usage becomes not valid
#Mark it as not valid
self.proxies[proxy]["valid"] = False
for ip_port in self.proxies.keys():
#And choose valid proxy from self.proxies
if self.proxies[ip_port]["valid"]:
failure.request.meta["proxy"] = ip_port
failure.request.meta["download_slot"] = ip_port
self.current_proxy = ip_port
return failure.request
print("Failure: "+str(failure))
if __name__ == "__main__":
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'COOKIES_ENABLED': False,
'DOWNLOAD_TIMEOUT' : 10,
'DOWNLOAD_DELAY' : 3,
})
c.crawl(ProxySpider)
c.start()
我在 python 的 scrapy 中编写了一个脚本,通过 get_proxies()
方法使用任一新生成的代理发出代理请求。我使用 requests
模块来获取代理以便在脚本中重用它们。但是,问题是我的脚本选择使用的代理可能并不总是好的,所以有时它无法获取有效响应。
How can I let my script keep trying with different proxies until there is a valid response?
到目前为止我的脚本:
import scrapy
import random
import requests
from itertools import cycle
from bs4 import BeautifulSoup
from scrapy.http.request import Request
from scrapy.crawler import CrawlerProcess
class ProxySpider(scrapy.Spider):
name = "sslproxies"
check_url = "https://whosebug.com/questions/tagged/web-scraping"
proxy_link = "https://www.sslproxies.org/"
def start_requests(self):
proxylist = self.get_proxies()
random.shuffle(proxylist)
proxy_ip_port = next(cycle(proxylist))
print(proxy_ip_port) #Checking out the proxy address
request = scrapy.Request(self.check_url, callback=self.parse,errback=self.errback_httpbin,dont_filter=True)
request.meta['proxy'] = "http://{}".format(proxy_ip_port)
yield request
def get_proxies(self):
response = requests.get(self.proxy_link)
soup = BeautifulSoup(response.text,"lxml")
proxy = [':'.join([item.select_one("td").text,item.select_one("td:nth-of-type(2)").text]) for item in soup.select("table.table tbody tr") if "yes" in item.text]
return proxy
def parse(self, response):
print(response.meta.get("proxy")) #Compare this to the earlier one whether they both are the same
def errback_httpbin(self, failure):
print("Failure: "+str(failure))
if __name__ == "__main__":
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'DOWNLOAD_TIMEOUT' : 5,
})
c.crawl(ProxySpider)
c.start()
PS My intension is to seek any solution the way I've started here.
你需要写一个下载器中间件,来安装一个process_exception
hook,scrapy 在抛出异常时调用这个钩子。在钩子中,你可以 return 一个新的 Request
对象,带有 dont_filter=True
标志,让 scrapy 重新安排请求直到它成功。
同时,您可以在process_response
hook 中广泛验证响应,检查状态码、响应内容等,并根据需要重新安排请求。
为了轻松更改代理,您应该使用内置 HttpProxyMiddleware
,而不是修补环境:
request.meta['proxy'] = proxy_address
以this project为例。
众所周知,http 响应需要通过所有中间件才能到达蜘蛛方法。
这意味着只有具有有效代理的请求才能进行蜘蛛回调函数。
为了使用有效代理,我们需要先检查所有代理,然后仅从有效代理中选择。
当我们之前选择的代理不再工作时 - 我们将此代理标记为无效并从蜘蛛中剩余的有效代理中选择一个新代理 errback
.
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.http.request import Request
class ProxySpider(scrapy.Spider):
name = "sslproxies"
check_url = "https://whosebug.com/questions/tagged/web-scraping"
proxy_link = "https://www.sslproxies.org/"
current_proxy = ""
proxies = {}
def start_requests(self):
yield Request(self.proxy_link,callback=self.parse_proxies)
def parse_proxies(self,response):
for row in response.css("table#proxylisttable tbody tr"):
if "yes" in row.extract():
td = row.css("td::text").extract()
self.proxies["http://{}".format(td[0]+":"+td[1])]={"valid":False}
for proxy in self.proxies.keys():
yield Request(self.check_url,callback=self.parse,errback=self.errback_httpbin,
meta={"proxy":proxy,
"download_slot":proxy},
dont_filter=True)
def parse(self, response):
if "proxy" in response.request.meta.keys():
#As script reaches this parse method we can mark current proxy as valid
self.proxies[response.request.meta["proxy"]]["valid"] = True
print(response.meta.get("proxy"))
if not self.current_proxy:
#Scraper reaches this code line on first valid response
self.current_proxy = response.request.meta["proxy"]
#yield Request(next_url, callback=self.parse_next,
# meta={"proxy":self.current_proxy,
# "download_slot":self.current_proxy})
def errback_httpbin(self, failure):
if "proxy" in failure.request.meta.keys():
proxy = failure.request.meta["proxy"]
if proxy == self.current_proxy:
#If current proxy after our usage becomes not valid
#Mark it as not valid
self.proxies[proxy]["valid"] = False
for ip_port in self.proxies.keys():
#And choose valid proxy from self.proxies
if self.proxies[ip_port]["valid"]:
failure.request.meta["proxy"] = ip_port
failure.request.meta["download_slot"] = ip_port
self.current_proxy = ip_port
return failure.request
print("Failure: "+str(failure))
if __name__ == "__main__":
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'COOKIES_ENABLED': False,
'DOWNLOAD_TIMEOUT' : 10,
'DOWNLOAD_DELAY' : 3,
})
c.crawl(ProxySpider)
c.start()