爬蟲實(shí)戰(zhàn)第四天
任務(wù)
爬取58同城北京地區(qū)手機(jī)號(hào)頁(yè)面 http://bj.58.com/shoujihao/
成果
爬取3000余條手機(jī)號(hào)的交易信息慎王,并保存到本地?cái)?shù)據(jù)庫(kù)
源碼
import requests
import time
from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Pool
from threading import Thread
# 創(chuàng)建tong_cheng數(shù)據(jù)庫(kù),并在該數(shù)據(jù)庫(kù)下面建立兩個(gè)collection保存鏈接和詳情
client = MongoClient('localhost', 27017)
tong_cheng = client['tong_cheng']
shoujihao_links = tong_cheng['shoujihao_links']
shoujihao_info = tong_cheng['shoujihao_info']
# spider 1 提取標(biāo)題和鏈接
def get_links(page):
wb_data = requests.get(page)
soup = BeautifulSoup(wb_data.text, 'lxml')
titles = soup.select('#infolist > div > ul > div > ul > li > a.t > strong')
links = soup.select('#infolist > div > ul > div > ul > li > a.t')
for title, link in zip(titles, links):
title = title.get_text()
link = link['href']
data = {'標(biāo)題': title, '鏈接': link}
shoujihao_links.insert_one(data)
# print(data)
# spider 2 提取對(duì)應(yīng)link的頁(yè)面詳情
def get_info(link):
wb_data = requests.get(link)
soup = BeautifulSoup(wb_data.text, 'lxml')
data = {
'鏈接': link,
# 找到標(biāo)題并處理字符串中的換行符稿黄、制表符和空格
'標(biāo)題': soup.select('#main > div.col.detailPrimary.mb15 > div.col_sub.mainTitle > h1')[0].get_text().replace('\n', '').replace('\t', '').replace(' ', ''),
'價(jià)格': soup.select('#main > div.col.detailPrimary.mb15 > div.col_sub.sumary > ul > li > div.su_con > span')[0].get_text().replace('\n', '').replace('\t', '').replace(' ', '')
}
shoujihao_info.insert_one(data)
# print(data)
# 查詢數(shù)據(jù)庫(kù)中sheet表單中的數(shù)據(jù)量
def count():
while True:
# .find().count()不要忘了.count()
print('Saved links:%s\nSaved items:%s\n---------------' % (shoujihao_links.find().count(), shoujihao_info.find().count()))
time.sleep(5)
# 運(yùn)行計(jì)數(shù)函數(shù)和查詢函數(shù)(多進(jìn)程和多線程)
def main(pages):
# 開啟線程t,運(yùn)行計(jì)數(shù)程序
t = Thread(target=count)
t.daemon = True
t.start()
# 主線程,第一步保存pages中的頁(yè)面信息枢贿,包括標(biāo)題和鏈接贱案;
pool = Pool()
pool.map(get_links, pages)
# 主線程,第二步從數(shù)據(jù)中找到鏈接列表并得到頁(yè)面詳情
pool.map(get_info, [link['鏈接'] for link in shoujihao_links.find()])
pages = ['http://bj.58.com/shoujihao/pn{}/'.format(str(i)) for i in range(1, 111)]
if __name__ == '__main__':
main(pages)
小結(jié)
- 多進(jìn)程可顯著提高爬蟲效率惊橱,多了解多進(jìn)程蚪腐、多線程相關(guān)知識(shí)。