# encoding: utf-8
import urllib2,requests? #引入庫名字需要寫對治宣,如果不確定轨淌,就可以到自己的python安裝包里去確認(rèn)#下,python安裝包地址:C:\Python27\Lib\site-packages 如:C:\Python27\Lib\site-packages\bs4
from bs4 import BeautifulSoup
import os
#需要pip install urllib2,requests,beautifulsoup4,lxml? ……windows可以使用pip list查看當(dāng)前裝了哪些庫
def download(url): # 沒有偽裝的下載器
print("Downloading: %s" % url)
try:
result = urllib2.request.urlopen(url, timeout=2).read()
except urllib.error.URLError as e:
print("Downloading Error:", e.reason)
result = None
return result
def download_browser(url, headers): # 帶瀏覽器偽裝的下載器
opener = urllib2.build_opener() #偽裝瀏覽器
opener.addheaders = headers #偽裝瀏覽器header
print("Downloading: %s" % url)
try:
result = opener.open(url, timeout=2)
result = result.read()
print("Download OK!")
except urllib2.request.URLError as e:
print("Downloading error:", e.reason)
result = None
return result
# 解析首頁,獲取url
def bs_parser(html):
tree = BeautifulSoup(html, 'lxml')
#使用的lxml方式讀取震束,所以需要安裝lxml語言里和XML以及HTML工作的功能最豐富和最容易使用庫
data = tree.find('div', class_='x-sidebar-left-content').find_all('a')? #這個結(jié)構(gòu)需要到具體需要爬取的網(wǎng)頁#里去自己找。
print(data[0].attrs['href'])
urls = []
titles = []
grades = []
for item in data:
urls.append(item.attrs['href'])
titles.append(item.get_text())
return urls, titles
# 解析頁面內(nèi)容
def bs_parser_content(html):
tree = BeautifulSoup(html, 'lxml')
data = tree.find('div', class_='x-wiki-content')
# print(data)
result = data.get_text()
return result
# 首頁url
url = 'http://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000'
root = 'http://www.liaoxuefeng.com'
# header一定是一個元組列表
headers = [
('Connection', 'Keep-Alive'),
('Accept', 'text/html, application/xhtml+xml, */*'),
('Accept-Language', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3'),
('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko')
]
html = download_browser(url, headers) # 下載首頁的HTML
urls, titles = bs_parser(html) # 解析首頁的HTML当犯,返回URL和標(biāo)題
i = 0
for item,title in zip(urls, titles):
if i==5:
break
i+= 1
url=root + item
html=download_browser(url, headers) # 下載頁面html
result=bs_parser_content(html) # 解析html垢村,獲取數(shù)據(jù)
# 合成文本文件路徑
fileName=str(i)+ '_' +title.replace(r'/',' ') +'.txt'
fileName= os.path.join('Results/', fileName)
print("fileName path is %s:" %fileName)
# 將數(shù)據(jù)寫入到文本文件
with open(fileName,'w') as f:
f.write(result.encode('utf-8').strip())