从 xml 文件中抓取 url 并抓取这些 url
Scraping urls from xml file and scraping those urls
所以我在 items.py 文件中声明了一个名为 ArtscraperItem 的项目。
import scrapy
class ArtscraperItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
date = scrapy.Field()
date_str = scrapy.Field()
url = scrapy.Field()
title = scrapy.Field()
art_content = scrapy.Field()
我是 运行 这个蜘蛛,正在从 xml 文件中收集数据。但是,我还需要从 xml 文件中获取 urls,然后抓取那些 urls 以获取文章的内容并将其添加为项目 [art_content ].我在 Whosebug 上看到了类似的东西,但他们没有使用以前声明的项目,所以我不知道如何使用它。因此,我需要从我抓取的 url 中获取内容并将其添加到我在解析方法中创建的 ArtscraperItem。
提前致谢。
有问题的方法和第二种 parse_article 方法应该抓取收集的 url 和 return 文章内容。
from datetime import datetime as dt
import scrapy
from ArtScraper.items import ArtscraperItem
class PostSpider(scrapy.Spider):
article = ""
name = 'crawly'
allowed_domains = ['bbc.com/arabic']
start_urls = ['http://feeds.bbci.co.uk/arabic/rss.xml']
def parse(self, response):
articles = response.xpath('//channel/item')
for article in articles:
item = ArtscraperItem()
item['date']= dt.today()
item['date_str'] = article.xpath('pubDate/text()').extract_first()
item['url'] = article.xpath('link/text()').extract_first()
item['title'] = article.xpath('title/text()').extract_first()
url = item['url']
yield scrapy.Request(url, callback=self.parse_article)
yield item
def parse_article(self, response):
pars = response.xpath("//div[@class='story-body']/div[@class='story-body__inner']/p/text()").extract()
article = '-'.join(pars)
yield{
'art_content': article
}
settings.py 文件
#Settings.py
BOT_NAME = 'ArtScraper'
SPIDER_MODULES = ['ArtScraper.spiders']
NEWSPIDER_MODULE = 'ArtScraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the
user-agent
#USER_AGENT = 'ArtScraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default:
16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-
delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = .25
RANDOMIZE_DOWNLOAD_DELAY=True
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ArtScraper.middlewares.ArtscraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-
middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ArtScraper.middlewares.ArtscraperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ArtScraper.pipelines.MongoPipeline': 300,
}
MONGO_URI='localhost:27017'
MONGO_DATABASE='george'
#Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel
to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-
middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE =
'scrapy.extensions.httpcache.FilesystemCacheStorage'
pipelines.py 文件
import logging
import pymongo
class MongoPipeline(object):
collection_name = 'articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
## pull in information from settings.py
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
## initializing spider
## opening db connection
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
## clean up when spider is closed
self.client.close()
def process_item(self, item, spider):
## how to handle each post
self.db[self.collection_name].insert(dict(item))
logging.debug("Post added to MongoDB")
return item
这称为请求链接和元结转。
您有 2 个请求制作 1 个项目,因此您需要:
- 转到url A,收集一些数据
- 转到url B,添加更多数据
- Return 包含来自 A 和 B 的数据的单个项目
到link这两步你可以使用Request.meta
属性,像这样:
def parse(self, response):
articles = response.xpath('//channel/item')
for article in articles:
item = ArtscraperItem()
...
yield scrapy.Request(
url,
callback=self.parse_article,
meta={'item': item}, # carry over our item
)
def parse_article(self, response):
# retrieve carried over item
item = response.meta['item']
pars = response.xpath("//div[@class='story-body']/div[@class='story-body__inner']/p/text()").extract()
item['art_content'] = '-'.join(pars)
yield item
所以我在 items.py 文件中声明了一个名为 ArtscraperItem 的项目。
import scrapy
class ArtscraperItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
date = scrapy.Field()
date_str = scrapy.Field()
url = scrapy.Field()
title = scrapy.Field()
art_content = scrapy.Field()
我是 运行 这个蜘蛛,正在从 xml 文件中收集数据。但是,我还需要从 xml 文件中获取 urls,然后抓取那些 urls 以获取文章的内容并将其添加为项目 [art_content ].我在 Whosebug 上看到了类似的东西,但他们没有使用以前声明的项目,所以我不知道如何使用它。因此,我需要从我抓取的 url 中获取内容并将其添加到我在解析方法中创建的 ArtscraperItem。
提前致谢。
有问题的方法和第二种 parse_article 方法应该抓取收集的 url 和 return 文章内容。
from datetime import datetime as dt
import scrapy
from ArtScraper.items import ArtscraperItem
class PostSpider(scrapy.Spider):
article = ""
name = 'crawly'
allowed_domains = ['bbc.com/arabic']
start_urls = ['http://feeds.bbci.co.uk/arabic/rss.xml']
def parse(self, response):
articles = response.xpath('//channel/item')
for article in articles:
item = ArtscraperItem()
item['date']= dt.today()
item['date_str'] = article.xpath('pubDate/text()').extract_first()
item['url'] = article.xpath('link/text()').extract_first()
item['title'] = article.xpath('title/text()').extract_first()
url = item['url']
yield scrapy.Request(url, callback=self.parse_article)
yield item
def parse_article(self, response):
pars = response.xpath("//div[@class='story-body']/div[@class='story-body__inner']/p/text()").extract()
article = '-'.join(pars)
yield{
'art_content': article
}
settings.py 文件
#Settings.py
BOT_NAME = 'ArtScraper'
SPIDER_MODULES = ['ArtScraper.spiders']
NEWSPIDER_MODULE = 'ArtScraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the
user-agent
#USER_AGENT = 'ArtScraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default:
16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-
delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = .25
RANDOMIZE_DOWNLOAD_DELAY=True
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ArtScraper.middlewares.ArtscraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-
middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ArtScraper.middlewares.ArtscraperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ArtScraper.pipelines.MongoPipeline': 300,
}
MONGO_URI='localhost:27017'
MONGO_DATABASE='george'
#Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel
to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-
middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE =
'scrapy.extensions.httpcache.FilesystemCacheStorage'
pipelines.py 文件
import logging
import pymongo
class MongoPipeline(object):
collection_name = 'articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
## pull in information from settings.py
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
## initializing spider
## opening db connection
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
## clean up when spider is closed
self.client.close()
def process_item(self, item, spider):
## how to handle each post
self.db[self.collection_name].insert(dict(item))
logging.debug("Post added to MongoDB")
return item
这称为请求链接和元结转。
您有 2 个请求制作 1 个项目,因此您需要:
- 转到url A,收集一些数据
- 转到url B,添加更多数据
- Return 包含来自 A 和 B 的数据的单个项目
到link这两步你可以使用Request.meta
属性,像这样:
def parse(self, response):
articles = response.xpath('//channel/item')
for article in articles:
item = ArtscraperItem()
...
yield scrapy.Request(
url,
callback=self.parse_article,
meta={'item': item}, # carry over our item
)
def parse_article(self, response):
# retrieve carried over item
item = response.meta['item']
pars = response.xpath("//div[@class='story-body']/div[@class='story-body__inner']/p/text()").extract()
item['art_content'] = '-'.join(pars)
yield item