author:艾莉亞·史塔克
【某某】
作為本公眾號的忠實讀者,你一定在想這個號到底是干嘛的策严,扯淡的嗎柄冲?
當(dāng)然不是,
扯淡是不可能的逝变,
這輩子都不可能扯淡!
更何況
扯淡這個主題也不可以作為公號的主題
所以
今天來點實用干貨
如果你經(jīng)常碰到一些收集信息的瑣碎任務(wù)奋构,那么可以考慮化繁為簡壳影,用爬蟲工具來替代手工勞動。
本文就是這樣一個例子:爬取信用信息公示系統(tǒng)中弥臼,企業(yè)信息詳情宴咧。
如下圖所示:
首先確定lxml規(guī)則
xpath finder
插件會直觀顯示匹配結(jié)果。
然后再ipython中驗證
In [1]: import requests
In [2]: from lxml import html
In [3]: resp=requests.get('http://www.sdsszt.com/GSpublicity/GSpublicityList.html?service=entInfo_QuIz54WYBCp98MAnDE+TOjSI6nj4d
...: DhPid4wNzIOjLyqVswLC8L8we/iqFGcaayM-q1d+FAeb99tNXz0PkuiXwA==&localSetting=sd&from=singlemessage')
In [4]: text=resp.content.decode('utf-8')
In [7]: root=html.fromstring(text)
In [21]: root.findall('.//tr/td/span[@class=\'label\']')[0].xpath('text()')
Out[21]: ['統(tǒng)一社會信用代碼/注冊號:']
In [22]: root.findall('.//table//tr/td/span[@class=\'label\']')[0].xpath('text()')
Out[22]: ['統(tǒng)一社會信用代碼/注冊號:']
In [23]: root.findall('.//table//tr/td/span[@class=\'content\']')[0].xpath('text()')
Out[23]: ['914406063454106971']
動手寫腳本径缅,一氣呵成
# encoding: utf-8
__author__ = 'fengshenjie'
import requests
from lxml import html
import json
import csv, random
conf = {
'start_url': [
'http://www.sdsszt.com/GSpublicity/GSpublicityList.html?service=entInfo_QuIz54WYBCp98MAnDE+TOjSI6nj4dDhPid4wNzIOjLyqVswLC8L8we/iqFGcaayM-q1d+FAeb99tNXz0PkuiXwA==&localSetting=sd&from=singlemessage'
],
'raw_headers': ['''Host: www.sdsszt.com
Connection: keep-alive
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,da;q=0.6
''']
}
def getHeader():
headerrow = random.choice(conf['raw_headers'])
res = {}
lines = headerrow.split('\n')
for line in lines:
try:
k, v = line.split(':')
res[k] = v
except Exception as e:
print(e, line)
return res
def downloader(url):
resp = requests.get(url)
return resp.content.decode('utf-8')
def parser(text):
assert isinstance(text, str)
root = html.fromstring(text)
res = []
labels = root.findall('.//tr/td/span[@class=\'label\']')
contents = root.findall('.//tr/td/span[@class=\'content\']')
assert len(labels) == len(contents)
for i in range(len(labels)):
label = labels[i].xpath('text()')
content = contents[i].xpath('text()')
res.append({
'label': label[0].replace('\r\n', '').strip(),
'content': content[0].strip()
})
# print(json.dumps(res, ensure_ascii=False))
outputer(res)
def outputer(res, fname='./shunde.csv'):
assert isinstance(res, list)
for d in res:
print(d['label'], d['content'])
lines = [(d['label'], d['content']) for d in res]
with open(fname, 'w', encoding='utf-8-sig') as f:
w = csv.writer(f)
w.writerows(lines)
def main():
for url in conf['start_url']:
print('->', url)
parser(downloader(url))
if __name__ == '__main__':
main()
這是我們最后輸出的文件: