Pipeline常用寫法

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
import pymongo
from datetime import datetime


class JsonWriterPipeline(object):

    def __init__(self, ouput_path):
        self.ouput_path = ouput_path

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            ouput_path=crawler.settings.get('OUTPUT_DIR'),
        )

    def open_spider(self, spider):
        file_path = self.ouput_path + datetime.strftime(datetime.now(), "%Y-%m-%d") + '.json'
        self.file = codecs.open(file_path, 'wb+', encoding='utf-8')

    def close_spider(self, spider):
        self.file.close()

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(line)
        return item


class MongoPipeline(object):
    collection_name = 'phones'

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        self.db[self.collection_name].insert_one(dict(item))
        return item

# -*- coding: utf-8 -*-

# Scrapy settings for jd_phone_model project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
import os
from datetime import datetime

# True:項目未發(fā)布   False:項目正式發(fā)布
__PROJECT_DONT_PUBLISH__ = True

ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

BOT_NAME = 'jd_phone_model'

SPIDER_MODULES = ['jd_phone_model.spiders']
NEWSPIDER_MODULE = 'jd_phone_model.spiders'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'jd_phone_model.middlewares.JdPhoneModelSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'jd_phone_model.middlewares.MyCustomDownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'jd_phone_model.pipelines.JsonWriterPipeline': 300,
    'jd_phone_model.pipelines.MongoPipeline': 301,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

LOG_DIR = ROOT_DIR + '/logs/'
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

OUTPUT_DIR = ROOT_DIR + '/output/'
if not os.path.exists(OUTPUT_DIR):
    os.makedirs(OUTPUT_DIR)

if __PROJECT_DONT_PUBLISH__ == False:
    # 發(fā)布狀態(tài)
    LOG_LEVEL = 'DEBUG'

    log_d = '/data/logs/' + BOT_NAME + '/'
    if not os.path.exists(log_d):
        os.makedirs(log_d)
    LOG_FILE = log_d + 'DEBUG_%s.txt' % (datetime.now().strftime('%Y%m%d_%H%M_%S'))

    MONGO_URI = "mongodb://XXXX:27017"
    MONGO_DATABASE = 'poi_dianping'
else:
    # 測試狀態(tài)
    MONGO_URI = "mongodb://127.0.0.1:27017"
    MONGO_DATABASE = 'jd_phone_model'

MYSQL_HOST = 'XXXX'
MYSQL_USER = 'crawler'
MYSQL_PASSWORD = 'pveLnmEzoGEJ9Cc'
MYSQL_DATABASE = 'crawler'
MYSQL_PORT = 3307

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個濱河市,隨后出現(xiàn)的幾起案子仰泻,更是在濱河造成了極大的恐慌良哲,老刑警劉巖,帶你破解...
    沈念sama閱讀 222,000評論 6 515
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件臭家,死亡現(xiàn)場離奇詭異,居然都是意外死亡,警方通過查閱死者的電腦和手機坤溃,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 94,745評論 3 399
  • 文/潘曉璐 我一進店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來嘱丢,“玉大人薪介,你說我怎么就攤上這事≡阶ぃ” “怎么了汁政?”我有些...
    開封第一講書人閱讀 168,561評論 0 360
  • 文/不壞的土叔 我叫張陵道偷,是天一觀的道長。 經(jīng)常有香客問我记劈,道長勺鸦,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 59,782評論 1 298
  • 正文 為了忘掉前任目木,我火速辦了婚禮换途,結(jié)果婚禮上,老公的妹妹穿的比我還像新娘刽射。我一直安慰自己军拟,他們只是感情好,可當(dāng)我...
    茶點故事閱讀 68,798評論 6 397
  • 文/花漫 我一把揭開白布柄冲。 她就那樣靜靜地躺著吻谋,像睡著了一般。 火紅的嫁衣襯著肌膚如雪现横。 梳的紋絲不亂的頭發(fā)上漓拾,一...
    開封第一講書人閱讀 52,394評論 1 310
  • 那天,我揣著相機與錄音戒祠,去河邊找鬼骇两。 笑死,一個胖子當(dāng)著我的面吹牛姜盈,可吹牛的內(nèi)容都是我干的低千。 我是一名探鬼主播,決...
    沈念sama閱讀 40,952評論 3 421
  • 文/蒼蘭香墨 我猛地睜開眼馏颂,長吁一口氣:“原來是場噩夢啊……” “哼示血!你這毒婦竟也來了?” 一聲冷哼從身側(cè)響起救拉,我...
    開封第一講書人閱讀 39,852評論 0 276
  • 序言:老撾萬榮一對情侶失蹤难审,失蹤者是張志新(化名)和其女友劉穎,沒想到半個月后亿絮,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體告喊,經(jīng)...
    沈念sama閱讀 46,409評論 1 318
  • 正文 獨居荒郊野嶺守林人離奇死亡,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 38,483評論 3 341
  • 正文 我和宋清朗相戀三年派昧,在試婚紗的時候發(fā)現(xiàn)自己被綠了黔姜。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點故事閱讀 40,615評論 1 352
  • 序言:一個原本活蹦亂跳的男人離奇死亡蒂萎,死狀恐怖秆吵,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情五慈,我是刑警寧澤纳寂,帶...
    沈念sama閱讀 36,303評論 5 350
  • 正文 年R本政府宣布实苞,位于F島的核電站,受9級特大地震影響烈疚,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜聪轿,卻給世界環(huán)境...
    茶點故事閱讀 41,979評論 3 334
  • 文/蒙蒙 一爷肝、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧陆错,春花似錦灯抛、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 32,470評論 0 24
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至绳慎,卻和暖如春纵竖,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背杏愤。 一陣腳步聲響...
    開封第一講書人閱讀 33,571評論 1 272
  • 我被黑心中介騙來泰國打工靡砌, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留,地道東北人珊楼。 一個月前我還...
    沈念sama閱讀 49,041評論 3 377
  • 正文 我出身青樓通殃,卻偏偏與公主長得像,于是被迫代替她去往敵國和親厕宗。 傳聞我的和親對象是個殘疾皇子画舌,可洞房花燭夜當(dāng)晚...
    茶點故事閱讀 45,630評論 2 359

推薦閱讀更多精彩內(nèi)容