1 基礎(chǔ)版本
1.1 抓取首頁鏈接碌秸,依次訪問
與前兩次實(shí)戰(zhàn)的區(qū)別不大丈攒,所以這次嘗試模塊化編程苞也。不再寫之前腳本式的代碼菩貌。寫完代碼之后來寫的博客教寂,所以只簡單記一下編碼過程中遇到的問題以及解決方案伪节。
- requests請求返回的數(shù)據(jù)出現(xiàn)了中文亂碼
仔細(xì)分析了一下頁面缅叠,考研幫該頁面的源代碼為:
<html lang="zh">
<head>
...
猜想還是編碼的問題舔哪。所以在requests的請求中,最終返回做一個(gè)編碼處理:
# 出現(xiàn)中文亂碼的解決方法
resp.encoding = 'utf-8'
- 取出的數(shù)據(jù)中包含大量的‘\r’ '\t' '\u' '\u3000'等控制字符铺峭。解決辦法為在requests之后墓怀,返回?cái)?shù)據(jù)之前做一個(gè).replece處理:
resp = resp.text.replace('\t', '')
.replace('\r', '')
.replace('\n', '').replace('\u3000', '')
基礎(chǔ)版本首頁最終代碼:
import time
import requests
from lxml import etree
start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}
# 訪問
def get_resp(url):
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/80.0.3987.116 Safari/537.36'
header = {'User-Agent': ua}
resp = requests.get(url, headers=header)
# 出現(xiàn)中文亂碼的解決方法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '').replace('\r', '').replace('\n', '').replace('\u3000', '')
return resp
else:
print(url + '訪問失敗')
def parse_teacher(resp_page):
et = etree.HTML(resp_page)
if et is not None:
# 處理標(biāo)題
selectors = et.xpath('//h1')
titles.append(selectors[0].text)
# 處理詳情介紹
selectors2 = et.xpath("http://div[@class='articleCon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)
else:
print('發(fā)現(xiàn)一個(gè)異常頁面,已跳過')
# 處理鏈接卫键,依次訪問超鏈接
def link_parse(resp):
et = etree.HTML(resp)
links = et.xpath("http://ul[@class='list areaZslist']/li//a/@href")
for link in links:
resp_page = get_resp(link)
parse_teacher(resp_page)
def make_and_print():
data.update(zip(titles, context))
print(data)
if __name__ == '__main__':
start_time = time.time()
# 訪問初始url
resp = get_resp(start_url)
# 處理鏈接 循環(huán)訪問下載
link_parse(resp)
# 處理最終數(shù)據(jù)并輸出
make_and_print()
last_time = time.time() - start_time
print(last_time)
輸出效果為:
V1效果
1.2 獲取下一頁
這種數(shù)據(jù)值抓取一頁當(dāng)然是毫無意義的傀履,下面要做的是檢查頁面是否存在下一頁,如果存在莉炉,再繼續(xù)加載下一頁钓账。核心代碼為:
# 多頁處理
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一頁地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('頁面加載完畢,開始逐個(gè)下載導(dǎo)師資料絮宁,請稍后...')
1.3 保存到記事本
以title為文件名梆暮,將各個(gè)導(dǎo)師信息保存到記事本中。
def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '資料保存完成绍昂!')
實(shí)現(xiàn)的效果為:
最終效果1
最終效果2
最終效果3
完美實(shí)現(xiàn)需求啦粹。第一部分結(jié)束。第一部分最終完整代碼:
import time
import os
import requests
from lxml import etree
start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}
# 訪問
def get_resp(url):
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/80.0.3987.116 Safari/537.36'
header = {'User-Agent': ua}
resp = requests.get(url, headers=header)
# 出現(xiàn)中文亂碼的解決方法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '') \
.replace('\r', '') \
.replace('\n', '') \
.replace('\u3000', '') \
.replace('\xa0', '')
return resp
else:
print(url + '訪問失敗')
def parse_teacher(resp_page):
et = etree.HTML(resp_page)
if et is not None:
# 處理標(biāo)題
selectors = et.xpath('//h1')
titles.append(selectors[0].text)
# 處理詳情介紹
selectors2 = et.xpath("http://div[@class='articleCon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)
else:
print('發(fā)現(xiàn)一個(gè)異常頁面窘游,已跳過')
# 處理鏈接唠椭,依次訪問超鏈接
def link_parse(resp):
et = etree.HTML(resp)
links = et.xpath("http://ul[@class='list areaZslist']/li//a/@href")
# 多頁處理
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一頁地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('頁面加載完畢,開始逐個(gè)下載導(dǎo)師資料张峰,請稍后...')
for link in links:
resp_page = get_resp(link)
parse_teacher(resp_page)
def make_and_print():
data.update(zip(titles, context))
print(data)
def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '資料保存完成泪蔫!')
if __name__ == '__main__':
start_time = time.time()
# save_data('./data_out', data)
# 訪問初始url
resp = get_resp(start_url)
# 處理鏈接 循環(huán)訪問下載
link_parse(resp)
# 處理最終數(shù)據(jù)并輸出
make_and_print()
save_data('./data_out', data)
print(len(titles))
last_time = time.time() - start_time
print(last_time)
2.多線程版
雖然數(shù)據(jù)量比較小,耗費(fèi)的時(shí)間也并不多喘批。但是覺得前面的多線程還是沒怎么理解,再寫一個(gè)demo試試吧~
首先導(dǎo)入threading系統(tǒng)庫铣揉。注意ing饶深。導(dǎo)入Queue用于存放鏈接隊(duì)列。
import threading
定義一個(gè)變量逛拱,控制線程個(gè)數(shù)敌厘。定義一個(gè)線程池,保存線程朽合。定義一個(gè)鏈接隊(duì)列
thread_num = 10
threads = []
links_queue = Queue()
在鏈接處理函數(shù)中俱两,將所有鏈接存入隊(duì)列。
for link in links:
links_queue.put(link)
在主函數(shù)中創(chuàng)建線程
for t in range(thread_num):
t = threading.Thread(target=download)
t.start()
threads.append(t)
編寫download()方法
def download():
while True:
link = links_queue.get()
if link is None:
break
resp_page = get_resp(link)
parse_teacher(resp_page)
print('當(dāng)前下載線程數(shù):%s,剩余%s條鏈接未解析' %
(len(threading.enumerate())-1, links_queue.qsize()))
依次退出線程
for i in range(thread_num):
links_queue.put(None)
for t in threads:
t.join()
實(shí)際效果明顯增快
效果1
效果2
效果3
與單線程相比曹步,增速一倍以上宪彩!
貼出多線程版完整代碼:
# 多線程爬取導(dǎo)師信息
import time
import os
import threading
from queue import Queue
import requests
from lxml import etree
start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}
thread_num = 10
threads = []
links_queue = Queue()
# 訪問
def get_resp(url):
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/80.0.3987.116 Safari/537.36'
header = {'User-Agent': ua}
resp = requests.get(url, headers=header)
# 出現(xiàn)中文亂碼的解決方法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '') \
.replace('\r', '') \
.replace('\n', '') \
.replace('\u3000', '') \
.replace('\xa0', '')
return resp
else:
print(url + '訪問失敗')
def parse_teacher(resp_page):
et = etree.HTML(resp_page)
if et is not None:
# 處理標(biāo)題
selectors = et.xpath('//h1')
titles.append(selectors[0].text)
# 處理詳情介紹
selectors2 = et.xpath("http://div[@class='articleCon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)
else:
print('發(fā)現(xiàn)一個(gè)異常頁面,已跳過')
# 處理鏈接讲婚,依次訪問超鏈接
def link_parse(resp):
et = etree.HTML(resp)
links = et.xpath("http://ul[@class='list areaZslist']/li//a/@href")
# 多頁處理
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一頁地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('頁面加載完畢尿孔,開始逐個(gè)下載導(dǎo)師資料,請稍后...')
for link in links:
links_queue.put(link)
# resp_page = get_resp(link)
# parse_teacher(resp_page)
def make_and_print():
data.update(zip(titles, context))
print(data)
def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '資料保存完成!')
def download():
while True:
link = links_queue.get()
if link is None:
break
resp_page = get_resp(link)
parse_teacher(resp_page)
print('當(dāng)前下載線程數(shù):%s,剩余%s條鏈接未解析' %
(len(threading.enumerate())-1, links_queue.qsize()))
if __name__ == '__main__':
start_time = time.time()
# 訪問初始url
resp = get_resp(start_url)
# 處理鏈接 循環(huán)訪問下載
link_parse(resp)
for t in range(thread_num):
t = threading.Thread(target=download)
t.start()
threads.append(t)
for i in range(thread_num):
links_queue.put(None)
for t in threads:
t.join()
# 處理最終數(shù)據(jù)并輸出
make_and_print()
save_data('./data_out', data)
last_time = time.time() - start_time
print('共下載%s條導(dǎo)師信息活合,耗時(shí)%s秒' % (len(titles), last_time))