成果
根據(jù)關(guān)鍵詞文件(一行一個)關(guān)鍵詞,找出每個關(guān)鍵詞在百度排名前十的二級域名:
這樣如果輸入的是一個行業(yè)的關(guān)鍵詞,可以比較快速的找出seo的競爭對手,也是一種比較粗暴的對手監(jiān)控
代碼1
直接采集搜索結(jié)果出現(xiàn)的地址
采集1.png
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#encoding=utf-8
from __future__ import division
import time
import requests #用于web頁面抓取慧域,很簡單心例,需要單獨安裝构哺,安裝很簡單。
import re
from bs4 import BeautifulSoup
kws=open(r'C:\Users\Administrator\Desktop\keywords.txt','r') #盤符可以任意修改检诗,注意不能有空行斥扛,初始url文本
morekwsall=open(r'C:\Users\Administrator\Desktop\jzdsbs4.txt','w+',encoding='utf-8')#注意編碼
headers1={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding':'gzip,deflate',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/31.0.1650.63 Safari/537.36'
'Refer':'http://www.hao123.com/',
'Cache-Control':'max-age=0',
'Cookie':'BAIDUID=8A69D49EC2D3B576D17277579AEDA83F:FG=1; BIDUPSID=8A69D49EC2D3B576D17277579AEDA83F; PSTM=1463384511; ' \
'BDUSS=2ZESWpnckRaeHRkVjdqN3dJa0RINWNrSDJTbm1sRFVlRUMzelNCVTBwSEliY0ZYQVFBQUFBJCQAAAAAAAAAAAEAAAAVcMhIYWFzZDEzM' \
'jM0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjgmVfI4JlXY; ' \
'uc_login_unique=f62bbe2bfc4a840aa437e67dcde239a3; BD_HOME=1; BDRCVFR[S4-dAuiWMmn]=I67x6TjHwwYf0; B64_BOT=1; ' \
'BD_UPN=12314353; sug=3; sugstore=1; ORIGIN=2; bdime=0; ' \
'BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; BD_CK_SAM=1; H_PS_PSSID=1428_18280_21116_20592_21189_21160_20929; BDSVRTM=0'
}
kwsline=kws.readlines()
for kw in kwsline:
utfline= kw.strip()
utzfline=utfline
time.sleep(2) # 還是不能太猖獗
o = 0
try:
urlbaidu1=str('http://www.baidu.com/baidu?wd='+utzfline+'&tn=monline_4_dg')
rbaidu1 = requests.get(urlbaidu1,headers= headers1,allow_redirects = False)
print(rbaidu1.status_code)
soup = BeautifulSoup(rbaidu1.content,'lxml')
result_urls = soup.select('.c-showurl')
for result_url in result_urls:
final_result = result_url.get_text().split('/')[0].split('-')[0].replace('\n', '')\
.replace(' ', '').replace('...', '').replace('...', '')
print(final_result)
morekwsall.write(final_result+'\n')
except Exception as e:
o = o+1
print(e)
time.sleep(100)
kwsline.append(kw)
finally:
print(o)
kws.close()
morekwsall.close()
print(u'結(jié)束!!!')
注意:
當(dāng)request出錯時截亦,等100s宴胧,把出錯的地址塞回列表繼續(xù)采集
50出錯,等100s童番,再采集50
except Exception as e:
o = o+1
print(e)
time.sleep(100)
kwsline.append(kw)
代碼2:
獲得搜索結(jié)果百度跳轉(zhuǎn)后的實際地址精钮。。剃斧。
![采集3.png](http://upload-images.jianshu.io/upload_images/3117575-c689bf3ff3887ee1.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#encoding=utf-8
from __future__ import division
import time
import requests #用于web頁面抓取轨香,很簡單,需要單獨安裝幼东,安裝很簡單臂容。
import re
from bs4 import BeautifulSoup
from urllib import request
kws=open(r'C:\Users\Administrator\Desktop\keywords.txt','r') #盤符可以任意修改,注意不能有空行筋粗,初始url文本
morekwsall=open(r'C:\Users\Administrator\Desktop\jzds.txt','w+',encoding='utf-8')#注意編碼
morekwserror=open(r'C:\Users\Administrator\Desktop\jzds_error.txt','w+',encoding='utf-8')
headers1={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding':'gzip,deflate',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/31.0.1650.63 Safari/537.36'
'Refer':'http://www.hao123.com/',
'Cache-Control':'max-age=0',
'Cookie':'BAIDUID=8A69D49EC2D3B576D17277579AEDA83F:FG=1; BIDUPSID=8A69D49EC2D3B576D17277579AEDA83F; PSTM=1463384511; ' \
'BDUSS=2ZESWpnckRaeHRkVjdqN3dJa0RINWNrSDJTbm1sRFVlRUMzelNCVTBwSEliY0ZYQVFBQUFBJCQAAAAAAAAAAAEAAAAVcMhIYWFzZDEzM' \
'jM0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjgmVfI4JlXY; ' \
'uc_login_unique=f62bbe2bfc4a840aa437e67dcde239a3; BD_HOME=1; BDRCVFR[S4-dAuiWMmn]=I67x6TjHwwYf0; B64_BOT=1; ' \
'BD_UPN=12314353; sug=3; sugstore=1; ORIGIN=2; bdime=0; ' \
'BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; BD_CK_SAM=1; H_PS_PSSID=1428_18280_21116_20592_21189_21160_20929; BDSVRTM=0'
}
kwsline=kws.readlines()
for kw in kwsline:
utfline= kw.strip()
utzfline=utfline
urlbaidu1=str('http://www.baidu.com/baidu?wd='+utzfline+'&tn=monline_4_dg')
try:
rbaidu1 = requests.get(urlbaidu1,headers=headers1)
soup = BeautifulSoup(rbaidu1.content,'lxml')
result_urls = soup.select('div > h3 > a')
for result_url in result_urls:
#time.sleep(0.5)# 還是不能太猖獗
try:
with request.urlopen(result_url.get('href')) as f:
real_url = f.geturl()# 會有訪問錯誤策橘,所以獲取失敗實際地址失敗炸渡,那失敗應(yīng)該怎么繼續(xù)獲得娜亿?
except Exception as e:
print("@@@")
print(result_url)
morekwserror.write(str(e) +':'+str(result_url) + '\n')
print(e)
real_url=('///')
print(real_url.split('/')[2])
finally:
real_url2 = real_url.split('/')[2]
print(real_url2)
morekwsall.write(real_url2 + '\n')
except Exception as e:
print(e)
print("!!!!")
time.sleep(50)
kwsline.append(kw)
kws.close()
morekwsall.close()
print(u'結(jié)束!!!')
多了1個try,因為
request.urlopen和f.geturl()在讀取最終狀態(tài)嗎的403蚌堵,404的頁面的時候會出錯买决;
我是想至少把錯誤地址也采集下來的沛婴,不過失敗了,所以目前是略過了督赤。嘁灯。
第二個try50出錯,開始51采集