項(xiàng)目地址 BookSpider
介紹
本篇涉及的內(nèi)容主要是獲取分類下的所有圖書數(shù)據(jù)悯姊,并寫入MySQL
準(zhǔn)備
Python3.6卖毁、Scrapy揖曾、Twisted、MySQLdb等
演示
代碼
一亥啦、創(chuàng)建項(xiàng)目
scrapy startproject BookSpider #創(chuàng)建項(xiàng)目
scrapy genspider douban book.douban.com #創(chuàng)建豆瓣爬蟲
二炭剪、創(chuàng)建測試類(main.py)
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'douban'])
三、修改配置(spiders/settings.py)
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36' #瀏覽器
ROBOTSTXT_OBEY = False #不遵循豆瓣網(wǎng)站的爬蟲協(xié)議
四翔脱、設(shè)置爬取的分類(spiders/douban.py)
start_urls = ['https://book.douban.com/tag/神經(jīng)網(wǎng)絡(luò)'] # 只測試爬取神經(jīng)網(wǎng)絡(luò)
五奴拦、獲取分類列表頁圖書數(shù)據(jù)
from scrapy.http import Request
from urllib.parse import urljoin
def parse(self, response):
get_nodes = response.xpath('//div[@id="subject_list"]/ul/li/div[@class="pic"]/a')
for node in get_nodes:
url = node.xpath("@href").get()
img_url = node.xpath('img/@src').get()
yield Request(url=url, meta={"img_url": img_url}, callback=self.parse_book) # 傳遞img_url值 放在meta里面, parse_book回調(diào)函數(shù)碍侦,獲取的詳情再分析
next_url = response.xpath('//div[@class="paginator"]/span[@class="next"]/a/@href').get() # 獲取下一頁地址
if(next_url):
yield Request(url=urljoin(response.url, next_url), callback=self.parse) # 獲取下一頁內(nèi)容
六粱坤、定義數(shù)據(jù)模型(spiders/items.py)
class BookspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field()
author = scrapy.Field()
publish = scrapy.Field()
page_num = scrapy.Field()
isbm = scrapy.Field()
binding = scrapy.Field()
publish_date = scrapy.Field()
price = scrapy.Field()
rate = scrapy.Field()
img_url = scrapy.Field()
image_path = scrapy.Field()
七、獲取圖書詳情數(shù)據(jù)
import re
from BookSpider.items import BookspiderItem
def parse_book(self, response):
BookItem = BookspiderItem()
BookItem['name'] = response.xpath('//span[@property="v:itemreviewed"]/text()').get("").strip()
BookItem['author'] = response.xpath('//span[contains(text(), "作者")]/following-sibling::a[1]/text()').get("").split()[-1]
BookItem['publish'] = response.xpath('//span[contains(text(), "出版社")]/following-sibling::text()').get("").strip()
page_num = response.xpath('//span[contains(text(), "頁數(shù)")]/following-sibling::text()').get("").strip()
BookItem['page_num'] = 0 if(page_num == '') else page_num
BookItem['isbm'] = response.xpath('//span[contains(text(), "ISBN")]/following-sibling::text()').get("").strip()
BookItem['binding'] = response.xpath('//span[contains(text(), "裝幀")]/following-sibling::text()').get("").strip()
BookItem['publish_date'] = response.xpath('//span[contains(text(), "出版年")]/following-sibling::text()').get("").strip()
price = response.xpath('//span[contains(text(), "定價(jià)")]/following-sibling::text()').get("").strip()
BookItem['price'] = '' if(len(price) == 0) else re.findall(r'\d+\.?\d*', price)[0]
BookItem['rate'] = response.xpath('//div[contains(@class, "rating_self ")]/strong/text()').get("").strip()
BookItem['img_url'] = [response.meta.get('img_url')] #圖片是列表
yield BookItem
八瓷产、下載圖片
1站玄、創(chuàng)建images文件加
2、配置spiders/settings.py
ITEM_PIPELINES = {
'BookSpider.pipelines.ImageStorePipeline': 1, #后面的數(shù)據(jù)是優(yōu)先級
}
IMAGES_URLS_FIELD = "image_url"
IMAGES_STORE = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'images')
3濒旦、創(chuàng)建ImageStorePipeline類(spiders/pipelines.py)
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http import Request
class ImageStorePipeline(ImagesPipeline):
default_headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36', #這個(gè)一定要
}
def get_media_requests(self, item, info):
for image_url in item['img_url']:
self.default_headers['referer'] = image_url
yield Request(image_url, headers=self.default_headers)
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
raise DropItem("Item contains no images")
item['image_path'] = image_path
return item
八株旷、寫入數(shù)據(jù)庫
1、配置spiders/settings.py
#設(shè)置數(shù)據(jù)庫
MYSQL_HOST = ""
MYSQL_DBNAME = ""
MYSQL_USER = ""
MYSQL_PASSWORD = ""
ITEM_PIPELINES = {
'BookSpider.pipelines.ImageStorePipeline': 1,
'BookSpider.pipelines.MysqlTwistedPipeline': 30,
}
2、創(chuàng)建MysqlTwistedPipeline類(spiders/pipelines.py)
import MySQLdb.cursors
from twisted.enterprise import adbapi
class MysqlTwistedPipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod #靜態(tài)方法晾剖,會(huì)優(yōu)先執(zhí)行from_settings锉矢, 這樣self.dbpool就有值了
def from_settings(cls, settings):
dbpool = adbapi.ConnectionPool("MySQLdb", host=settings['MYSQL_HOST'], db = settings['MYSQL_DBNAME'], user = settings['MYSQL_USER'], passwd = settings['MYSQL_PASSWORD'], charset = 'utf8', cursorclass = MySQLdb.cursors.DictCursor, use_unicode = True)
return cls(dbpool)
def process_item(self, item, spider):
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error,item,spider)
def do_insert(self, cursor, item):
insert_sql = """
insert into douban(name, author, publish, page_num, isbm, binding, publish_date, price, rate, img_url, image_path)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_sql, (item['name'], item['author'], item['publish'], item['page_num'], item['isbm'], item['binding'], item['publish_date'], item['price'], item['rate'], item['img_url'], item['image_path']))
def handle_error(self, failure, item, spider):
print(failure)
九、測試
1齿尽、執(zhí)行main.py文件