1、启动pipeline
ITEM_PIPELINES = {
# 'jingxi.pipelines.JingxiPipeline': 200,
'jingxi.pipelines.BaiduPipeline': 300,
'jingxi.pipelines.TencentPipeline': 100,
}
开启多个pipeline后,yield出的item会在所有pipeline中流转,流转的顺序以后便分配的数字为依据,数字越小,流转次序越高。
2、如何区分不同的pipeline
class BaiduPipeline:
def open_spider(self, spider):
if spider.name != 'baidu': return
print("开始运行-百度爬虫")
def process_item(self, item, spider):
print('&&&&&&&&&',spider.name,item)
print('进入通道')
print("写入成功")
return item
def close_spider(self, spider):
print("运行结束-百度爬虫")
class TencentPipeline:
def open_spider(self, spider):
if spider.name != 'tencent': return
print("开始运行-腾讯爬虫")
def process_item(self, item, spider):
if spider.name != 'tencent': return item
print('@@@@@@@@@@@@@',spider.name,item)
print('进入通道')
print("写入成功")
return item
def close_spider(self, spider):
if spider.name != 'tencent': return
print("运行结束-腾讯爬虫")
简单来说,就是以spider.name 区分不同的爬虫,当该item正好流转到该爬虫时,可截断它继续流转。当流过目标pipeline时,可将其直接抛出。
在pipelin中引入settings
import pymongo
class MongoPipeline(object):
collection_name = 'scrapy_items'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
self.db[self.collection_name].insert(dict(item))
return item
简单粗暴,利用pipeline的类方法from_crawler