抓取基本解釋信息 及其他字典地址
# --------------------------------------
# 抓取字典詳細信息,保存
# 保存字典其他信息網(wǎng)址
# --------------------------------------
import string
import urllib.request
import re
from bs4 import BeautifulSoup
import codecs
# 頁面地址
# 開網(wǎng)頁地址文件并抓取
def scrapt(zurl):
print(zurl)
req = urllib.request.Request(zurl)
req.add_header('Referer', 'http://www.zdic.net/z/jbs/')
req.add_header('User-Agent', 'None')
responseb = urllib.request.urlopen(req)
index_z = responseb.read()
# 理數(shù)據(jù)得到字典解釋及相關字典
index_z = index_z.decode('utf8')
# 個真是無語了
# 取頁面中的信息及字頁面地址
soup = BeautifulSoup(index_z)
tab_raw = soup.find_all(attrs={'class' : 'tab-row'})
# 取自他字典地址
for itemtab in tab_raw:
zurllist = re.findall(r'/./.*?htm', str(itemtab))
for line in zurllist:
zurllistfile.write('http://www.zdic.net'+line+'\n')
# 數(shù)不能為疊加器
tab_page = soup.find_all(attrs={'class':'tab-page'})
# 取字典解釋獲取解釋內(nèi)容
# 取url中的16進制代碼
keyq = re.split(r'[/.]',zurl)[-2]
print(keyq)
if len(keyq)>4:
keyq = keyq[1:]
print(keyq)
key = (b'\u' + keyq.encode()).decode('unicode-escape')
print(key)
# 存字典內(nèi)容
for tab_page_item in tab_page:
tab_page_item['key'] = key
type(str(tab_page_item))
hdjbjs.write(str(tab_page_item)+'\n')
# 數(shù)不能為疊加器
hdjbjs = codecs.open("hdjibenjieshi_file", "w",'utf-8')
# 開字典數(shù)據(jù)文件
hdjbjs.write("<xml name='漢典基本解釋'>")
# 數(shù)不能為疊加器
zurllistfile = codecs.open("otherzurllist", "w",'utf-8')
# 開字典地址文件
jsurlfile = open('zdurlfile_jibenjieshizdurl','r')
# 取網(wǎng)址
a = jsurlfile.read()
b = a.split('\n')
for zurl in b:
if len(zurl) !=0:
scrapt(zurl)
# 出前關閉文件
zurllistfile.close()
hdjbjs.write("</xml>")
# 數(shù)不能為疊加器
hdjbjs.close()