二.爬蟲(chóng)簡(jiǎn)介以及爬蟲(chóng)的技術(shù)價(jià)值
2-1:爬蟲(chóng)是什么?
2-2:爬蟲(chóng)技術(shù)的價(jià)值甚脉?
三.簡(jiǎn)單爬蟲(chóng)架構(gòu)
3-1:簡(jiǎn)單爬蟲(chóng)架構(gòu)
3-2:簡(jiǎn)單爬蟲(chóng)架構(gòu)的動(dòng)態(tài)運(yùn)行流程:
四.URL管理器和實(shí)現(xiàn)方法
4-1 URL管理器
4-2 URL管理器的實(shí)現(xiàn)方式
五.網(wǎng)頁(yè)下載器和urllib2模塊
5.1 網(wǎng)頁(yè)下載器簡(jiǎn)介
5.2 urlib2下載器網(wǎng)頁(yè)的三種方式
1.urlopen() 直接打開(kāi)
import urllib2
#直接請(qǐng)求
response = urllib2.urlopen('http://www.baidu.com')
#獲取狀態(tài)碼将鸵,如果是200表示獲取成功
print response.getcode()
#獲取內(nèi)容
cont = response.read()
2.添加data,http header
import urlib2
#創(chuàng)建Request對(duì)象
request = urllib2.Request(url)
#添加數(shù)據(jù)
request.add_data('a','1')
#添加http的header,偽裝成瀏覽器登錄
request.add_header('User-Agent','Mozilla/5.0')
#發(fā)送請(qǐng)求獲取結(jié)果
response = urllib2.urlopen(request)
3.添加特殊情景的處理器
import urllib2,cookielib
#創(chuàng)建cookie容器
cj = cookielib.CookieJar()
# 創(chuàng)建1個(gè)opener
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#給urllib2安裝opener
urllib2.install_opener(opener)
#使用帶有cookie的urllib2訪問(wèn)網(wǎng)頁(yè)
response = urllib2.urlopen("http://www.xxx.com")
六.**網(wǎng)頁(yè)解析器和BeautifulSoup第三方模塊 **
6-1:網(wǎng)頁(yè)解析器簡(jiǎn)介
6-2:BeautifulSoup模塊介紹和安裝
用于從HTML或XML中提取數(shù)據(jù)
6-3:BeautifulSoup的語(yǔ)法
創(chuàng)建 BeautifulSoup對(duì)象:
6-4: BeautifulSoup實(shí)例測(cè)試
import re
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" id="link1">Elsie</a>,
<a class="sister" id="link2">Lacie</a> and
<a class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc,'html.parser',from_encoding='utf-8')
print '獲取所有的鏈接'
links = soup.find_all('a') #查找所有標(biāo)簽為a的節(jié)點(diǎn)
for link in links:
print link.name,link['href'],link.get_text()
print '獲取lacie的鏈接'
link_node = soup.find('a',)#查找所有標(biāo)簽為a勉盅,且鏈接符合x(chóng)x的節(jié)點(diǎn)
print link_node.name, link_node['href'],link_node.get_text()
print '正則匹配'
link_node = soup.find('a',href = re.compile(r"ill"))#查找所有標(biāo)簽為a,且正則匹配的節(jié)點(diǎn)
print link_node.name, link_node['href'],link_node.get_text()
print '獲取p段落文字'
p_node = soup.find('p',class_ = "title") #查找所有標(biāo)簽為p顶掉,且class為title的節(jié)點(diǎn)
print p_node.name,link_node.get_text()
七.**實(shí)戰(zhàn)演練:爬取百度百科1000個(gè)頁(yè)面的數(shù)據(jù) **
7-1:爬蟲(chóng)實(shí)例-分析目標(biāo)
審查元素
7-2:調(diào)度程序:spider_main.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
from baike_spider import url_manager
from baike_spider import html_downloader
from baike_spider import html_parser
from baike_spider import html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url): #爬蟲(chóng)的調(diào)度程序
count = 1 #記錄當(dāng)前爬去的是第幾個(gè)url
self.urls.add_new_url(root_url)
try:
while self.urls.has_new_url(): #如果有新的url
new_url = self.urls.get_new_url() #獲取一個(gè)新的url
print 'craw %d : %s ' % (count , new_url) #輸出第幾個(gè)Url
html_cont = self.downloader.download(new_url) #下載Url
new_urls,new_data = self.parser.parse(new_url,html_cont) #調(diào)用解析器來(lái)解析頁(yè)面,傳入當(dāng)前url,和爬去的數(shù)據(jù)
self.urls.add_new_urls(new_urls) #解析出來(lái)的Url添加至url管理器
self.outputer.collect_data(new_data) #收集解析出來(lái)的data
if count == 1000:
break
count = count + 1
except:
print 'craw failed' #異常處理
self.outputer.output_html() #輸出收集好的數(shù)據(jù)
if __name__=="__main__":
root_url = "http://baike.baidu.com/view/21087.htm"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
7-3:URL管理器:url_manager.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
class UrlManager(object): #需要維護(hù)2個(gè)列表:1.待爬取的列表 2,爬取過(guò)的列表
def __init__(self):
self.new_urls = set()
self.old_urls = set()
def add_new_url(self,url): #向管理器中添加一個(gè)新的URL
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls): #批量添加
if urls is None or len(urls) == 0: #urls空或者數(shù)量為0
return
for url in urls:
self.add_new_url(url) #逐個(gè)添加
def has_new_url(self): #判斷管理器中是否有新的待爬取的url
return len(self.new_urls) != 0
def get_new_url(self): #從url管理器中獲取一個(gè)新的待爬取的Url
new_url = self.new_urls.pop() #pop方法取出并移除一個(gè)
self.old_urls.add(new_url)
return new_url
7-4:HTML下載器:html_downloader.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
import urllib2
class HtmlDownloader(object): #對(duì)外提供一個(gè)下載的方法
def download(self,url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200: #失敗時(shí)候
return None
return response.read()
7-5草娜;HTML解析器:html_parser.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
import re
import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object): #傳入一個(gè)url,和數(shù)據(jù) 解析出新的url和數(shù)據(jù)
def parse(self,page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser',from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls,new_data
def _get_new_urls(self, page_url, soup): #獲取其他詞條的url
new_urls = set()
#正則需要匹配類似 /view/123.htm
links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm")) #查找所有標(biāo)簽為a的節(jié)點(diǎn)
for link in links:
new_url = link['href'] #獲取新的鏈接
new_full_url = urlparse.urljoin(page_url, new_url) #拼接成完整的url
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup): #解析數(shù)據(jù)
res_data = {}
#url
res_data['url'] = page_url
#<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find("h1")
res_data['title'] = title_node.get_text()
#<div class="lemma-summary" label-module="lemmaSummary">
summary_node = soup.find('div', class_="lemma-summary")
res_data['summary'] = summary_node.get_text()
return res_data
7-6:HTML輸出器:html_outputer.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
class HtmlOutputer(object):
def __init__(self):
self.datas = [] #列表
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
def output_html(self):
fout = open('output.html','w')
fout.write("<html>")
fout.write("<head>")
fout.write(' <meta charset="utf-8"> ')
fout.write("</head>")
fout.write("<body>")
fout.write("<table>")
#python默認(rèn)編碼 Ascii
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>" % data['url'])
fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
fout.write("</tr>")
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
7-7:開(kāi)始運(yùn)行爬蟲(chóng)和爬取結(jié)果展示:output.html
八:課程總結(jié)
ps:圖解講課的方式真的很贊~~