process_item(self, item, spider)
方法和说明 | 参数 |
open_spider(self, spider)
打开爬虫时选中。
|
spider (spider object)-它指的是被打开的爬虫。
|
close_spider(self, spider)
爬虫关闭时被选中。
|
spider (spider object)-它指的是关闭的爬虫。
|
from_crawler(cls, crawler)
在爬虫的帮助下,管道可以访问信号和Scrapy 的设置。
|
crawler (Crawler object)-它指的是使用这个管道的爬虫。
|
from Scrapy.exceptions import DropItem class PricePipeline(object): vat = 2.25 def process_item(self, item, spider): if item['price']: if item['excludes_vat']: item['price'] = item['price'] * self.vat return item else: raise DropItem("Missing price in %s" % item)
import json class JsonWriterPipeline(object): def __init__(self): self.file = open('items.jl', 'wb') def process_item(self, item, spider): line = json.dumps(dict(item)) + "\n" self.file.write(line) return item
import pymongo class MongoPipeline(object): collection_name = 'Scrapy_list' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri = crawler.settings.get('MONGO_URI'), mongo_db = crawler.settings.get('MONGO_DB', 'lists') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider): self.db[self.collection_name].insert(dict(item)) return item
from scrapy.exceptions import DropItem class DuplicatesPipeline(object): def __init__(self): self.ids_seen = set() def process_item(self, item, spider): if item['id'] in self.ids_seen: raise DropItem("Repeated items found: %s" % item) else: self.ids_seen.add(item['id']) return item
ITEM_PIPELINES = { 'myproject.pipelines.PricePipeline': 100, 'myproject.pipelines.JsonWriterPipeline': 600, }