#utf-8
# 引入各個(gè)模塊瀑晒,及創(chuàng)建各個(gè)模塊的類
from baike_spiderimport url_manage, html_downloader, html_parse,html_outputer
class SpiderMain (object):
# 對(duì)各模塊各個(gè)函數(shù)進(jìn)行初始化
? ? def __init__(self):
self.urls = url_manage.UrlManage()# 初始化url管理器的類
? ? ? ? self.downloader = html_downloader.HtmlDownLoader()#下載器
? ? ? ? self.parse = html_parse.HtmlParse()#url解析器
? ? ? ? self.outputer = html_outputer.HtmlOutputer()#輸出器
? ? def craw(self, root_url):# 爬蟲調(diào)度函數(shù)
? ? ? ? count =1
? ? ? ? self.urls.add_new_url (root_url)# 將url添加進(jìn)url管理器中
? ? ? ? while self.urls.has_new_url():
try:
new_url =self.urls.get_new_url()# 添加單個(gè)url
? ? ? ? ? ? ? ? print ("craw 第 %d : %s" % (count, new_url))
html_cont =self.downloader.download(new_url)
new_urls, new_data =self.parse.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)# 添加批量URL
? ? ? ? ? ? ? ? print(new_urls)
self.outputer.collect_data(new_data)
if count >=10:
break
? ? ? ? ? ? ? ? count +=1
? ? ? ? ? ? except Exception as e:
print (str (e))#輸出發(fā)生的錯(cuò)誤
? ? ? ? ? ? self.outputer.html_output()
if __name__ =="__main__":# 創(chuàng)建main函數(shù)
? ? root_url ="https://baike.baidu.com/item/Python"? # 入口url
? ? obj_spider = SpiderMain()# 創(chuàng)建調(diào)度程序
? ? obj_spider.craw(root_url)# 創(chuàng)建craw函數(shù),啟動(dòng)爬蟲
class UrlManage(object):
def __init__(self):#對(duì)原始變量進(jìn)行初始化
? ? ? ? self.new_urls =set()
self.old_urls =set()
def add_new_url(self,url):#獲取新的url
? ? ? ? if urlis None:
return
? ? ? ? if urlnot in self.new_urlsand urlnot in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls):#批量增加新的urls
? ? ? ? if urlsis None or len(urls) ==0:#判斷urls是否有url
? ? ? ? ? ? return
? ? ? ? for urlin urls:
self.add_new_url(url)
def has_new_url(self):#判斷是否有新的url
? ? ? ? return len(self.new_urls) !=0
? ? def get_new_url(self):#獲取新的url
? ? ? ? new_url =self.new_urls.pop()#對(duì)新的url進(jìn)行刪除
? ? ? ? self.old_urls.add(new_url)#將已經(jīng)爬取過的url增加至舊的urL中
? ? ? ? return new_url#返回新的url
import urllib.request
class HtmlDownLoader(object):
def download(self,url):
if urlis None:
return None
? ? ? ? response = urllib.request.urlopen(url)
if response.getcode() !=200:
return None
? ? ? ? return response.read()
import urllib.parse#python3parse存在與urllib 庫中
import re
from bs4import BeautifulSoup
class HtmlParse(object):
def _get_new_urls(self,page_url,soup):
# view/123.html
? ? ? ? new_urls =set()
links = soup.find_all("a",href = re.compile(r"/item/"))
for linkin links:
new_url = link["href"]
new_full_url = urllib.parse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
res_data = {}
res_data["url"] = page_url
#
Python
? ? ? ? title_node = soup.find("dd",class_ ="lemmaWgt-lemmaTitle-title").find("h1")
res_data["title"] = title_node.get_text()
#
? ? ? ? summary_node = soup.find('div',class_ ="lemma-summary")
if summary_nodeis None:
return
? ? ? ? res_data["summary"] = summary_node.get_text()
return res_data
def parse(self,page_url,html_cont):
if page_urlis None or html_contis None:
return
? ? ? ? soup = BeautifulSoup(html_cont,'html.parser', from_encoding='utf-8')
new_urls =self._get_new_urls(page_url,soup)
new_data =self._get_new_data(page_url,soup)
return new_urls,new_data
class HtmlOutputer(object):
def __init__(self):
self.datas? = []
def collect_data(self,data):
if datais None:
return
? ? ? ? self.datas.append(data)
def html_output(self):
export =open("output.html","w",encoding="utf-8")
export.write("<html>")
export.write ("<body>")
export.write("<table>")
for data in self.datas:
export.write ("<tr>")
export.write ("<td>%s/<td>"%data["url"])
export.write ("<td>%s/<td>" % data["title"])
export.write ("<td>%s/<td>" % data["summary"])
export.write ("</tr>")
export.write ("</table>")
export.write ("</body>")
export.write ("</html>")