Scrapy:爬取 1 层深度的站外链接
Scrapy: crawl 1 level deep on offsite links
在 scrapy 中,我将如何让 scrapy 只对允许域之外的所有链接进行 1 级深度爬行。在爬网中,我希望能够确保站点内的所有出站链接都有效,而不是 404。我不希望它抓取不允许域的整个站点。我目前正在处理允许的域 404。我知道我可以将 DEPTH_LIMIT 设置为 1,但这也会影响允许的域。
我的代码:
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from smcrawl.items import Website
import smcrawl.util
def iterate(lists):
for a in lists:
return a
class WalmartSpider(CrawlSpider):
handle_httpstatus_list = [200, 302, 404, 500, 502]
name = "surveymonkeycouk"
allowed_domains = ["surveymonkey.co.uk", "surveymonkey.com"]
start_urls = ['https://www.surveymonkey.co.uk/']
rules = (
Rule(
LinkExtractor(
allow=(),
deny=(),
process_value=smcrawl.util.trim),
callback="parse_items",
follow=True,),
)
#process_links=lambda links: [link for link in links if not link.nofollow] = filter nofollow links
#parses start urls
def parse_start_url(self, response):
list(self.parse_items(response))
def parse_items(self, response):
hxs = Selector(response)
sites = response.selector.xpath('//html')
items = []
for site in sites:
if response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
elif response.status == 200:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 302:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 500:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 502:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
else:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
items.append(item)
return items
好的,您可以做的一件事是避免使用 allowed_domains
,这样您就不会过滤任何异地请求。
但为了让它变得有趣,您可以创建自己的 OffsiteMiddleware
,像这样:
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware
class MyOffsiteMiddleware(OffsiteMiddleware):
offsite_domains = set()
def should_follow(self, request, spider):
regex = self.host_regex
host = urlparse_cached(request).hostname or ''
if host in offsite_domains:
return False
if not bool(regex.search(host)):
self.offsite_domains.add(host)
return True
我还没有测试过,但它应该可以工作,记住你应该禁用默认中间件并在设置中启用你的中间件:
SPIDER_MIDDLEWARES = {
'myproject.middlewares.MyOffsiteMiddleware': 543,
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None,
}
我将 作为答案。它与我正在寻找的解决方案略有不同,但是有了我愿意抓取的 URL 白名单,最终结果是一样的。谢谢!
在 scrapy 中,我将如何让 scrapy 只对允许域之外的所有链接进行 1 级深度爬行。在爬网中,我希望能够确保站点内的所有出站链接都有效,而不是 404。我不希望它抓取不允许域的整个站点。我目前正在处理允许的域 404。我知道我可以将 DEPTH_LIMIT 设置为 1,但这也会影响允许的域。
我的代码:
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from smcrawl.items import Website
import smcrawl.util
def iterate(lists):
for a in lists:
return a
class WalmartSpider(CrawlSpider):
handle_httpstatus_list = [200, 302, 404, 500, 502]
name = "surveymonkeycouk"
allowed_domains = ["surveymonkey.co.uk", "surveymonkey.com"]
start_urls = ['https://www.surveymonkey.co.uk/']
rules = (
Rule(
LinkExtractor(
allow=(),
deny=(),
process_value=smcrawl.util.trim),
callback="parse_items",
follow=True,),
)
#process_links=lambda links: [link for link in links if not link.nofollow] = filter nofollow links
#parses start urls
def parse_start_url(self, response):
list(self.parse_items(response))
def parse_items(self, response):
hxs = Selector(response)
sites = response.selector.xpath('//html')
items = []
for site in sites:
if response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
elif response.status == 200:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 302:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 500:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 502:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
else:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
items.append(item)
return items
好的,您可以做的一件事是避免使用 allowed_domains
,这样您就不会过滤任何异地请求。
但为了让它变得有趣,您可以创建自己的 OffsiteMiddleware
,像这样:
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware
class MyOffsiteMiddleware(OffsiteMiddleware):
offsite_domains = set()
def should_follow(self, request, spider):
regex = self.host_regex
host = urlparse_cached(request).hostname or ''
if host in offsite_domains:
return False
if not bool(regex.search(host)):
self.offsite_domains.add(host)
return True
我还没有测试过,但它应该可以工作,记住你应该禁用默认中间件并在设置中启用你的中间件:
SPIDER_MIDDLEWARES = {
'myproject.middlewares.MyOffsiteMiddleware': 543,
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None,
}
我将