由于網(wǎng)頁結(jié)構(gòu)跟之前有變化铸董,還不是很熟悉。代碼待完善肴沫,問題記錄:
- 騰訊新聞二級網(wǎng)頁內(nèi)容爬取有問題粟害。
- 鏈家網(wǎng)站頭文件沒有用到。
爬取一條騰訊視頻的header內(nèi)容颤芬,存入txt悲幅。要求:
- 包含網(wǎng)頁鏈接
- 包含title
- 包含所有headers信息
import requests
from bs4 import BeautifulSoup
u = 'http://news.qq.com/a/20170205/023923.htm'
r = requests.get(url = u)
headers = r.headers #獲取網(wǎng)頁頭部信息
#print(headers)
soup = BeautifulSoup(r.text,'lxml')
title = soup.title.text #獲取網(wǎng)頁title內(nèi)容
#print(title)
f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\header.txt','w',encoding='utf8')
f.seek(0)
# 創(chuàng)建一個txt文件
f.write('爬取網(wǎng)頁:'+str(u)+'\n')
f.write('新聞標(biāo)題為:'+title+'\n')
for i in headers:
lst = [i,':',headers[i],'\n']
f.writelines(lst)
f.close()
print('finished!')
爬取騰訊新聞網(wǎng)站上,某一天的某類新聞標(biāo)題站蝠。要求:
- 開頭:‘XX年XX月XX日騰訊新聞’
- 包括新聞標(biāo)題和網(wǎng)址
(爬取每一條新聞的內(nèi)容(二級標(biāo)題))
import requests
from bs4 import BeautifulSoup
import re
u = 'http://news.qq.com/world_index.shtml'
r = requests.get(url = u)
soup = BeautifulSoup(r.text,'lxml')
f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news.txt','w',encoding='utf8')
f.seek(0)
# 創(chuàng)建一個txt文件
f.write('2018年8月26日騰訊新聞\n')
news = soup.find_all('a',href=re.compile('http://news.qq.com/a/20180825/'))
#print(news)
for i in news:
#print(i)
txt = i.text.strip()#strip() 用于去掉前后空格
if txt =='':
continue
else:
lst = [txt,',','url=',i.attrs['href']]
f.writelines(lst)
f.close()
print('finished!')
修改
import requests
from bs4 import BeautifulSoup
import re
u = 'http://news.qq.com/world_index.shtml'
r = requests.get(url = u)
soup = BeautifulSoup(r.text,'lxml')
f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news.txt','w',encoding='utf8')
f.seek(0)
# 創(chuàng)建一個txt文件
f.write('2018年8月26日騰訊新聞\n')
news = soup.find_all('a',href=re.compile('//new.qq.com/omn/20180826'))
#print(news)
for i in news:
#print(i)
txt = i.text.strip()#strip() 用于去掉前后空格
if txt =='':
continue
else:
lst = [txt,',','url=','http:',i.attrs['href'],'\n']
f.writelines(lst)
f.close()
print('finished!')
添加正文內(nèi)容:
import requests
from bs4 import BeautifulSoup
import re
u = 'http://news.qq.com/world_index.shtml'
r = requests.get(url = u)
soup = BeautifulSoup(r.text,'lxml')
f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news2.txt','w',encoding='utf8')
f.seek(0)
# 創(chuàng)建一個txt文件
f.write('2018年8月26日騰訊新聞\n')
news = soup.find_all('a',href=re.compile('http://news.qq.com/a/2018'))
#print(news)
for i in news:
#print(i)
txt = i.text.strip()#strip() 用于去掉前后空格
if txt =='':
continue
else:
ul = i.attrs['href']
ur = requests.get(url = ul)
usoup = BeautifulSoup(ur.text,'lxml')
f.write(txt+'\n')
#打印正文
f.write('正文如下:\n')
if usoup.body.attrs[id]=='P-QQ':#排除圖片新聞
continue
else:
p = usoup.find('div',id="Cnt-Main-Article-QQ").find_all('p')
for i in p:
print(i.text)
f.write(i.text+'\n')
f.write('\n')
f.close()
print('finished!')
爬蟲正確的習(xí)慣和邏輯
- 函數(shù)式爬取
- 用瀏覽器去訪問汰具,headers信息
r = request.get(url='...',headers={...})
以瀏覽器的形式向網(wǎng)頁進行請求
頭部信息
headers = input("粘貼頭部信息:")
lst = headers.split('\n')
m=[]
for i in lst:
key = i.split(':')[0]
value = i.split(':')[1]
m.append([str(key),str(value)])
print(dict(m))
def header_format(h):
"""
函數(shù)用于轉(zhuǎn)譯網(wǎng)頁headers信息
h:輸入的headers信息
"""
h = input("粘貼頭部信息:")
lst = h.split('\n')
m=[]
for i in lst:
key = i.split(':')[0]
value = i.split(':')[1]
m.append([str(key),str(value)])
return(dict(m))
print(header_format(headers))
用函數(shù)式寫法的優(yōu)點:
- 閱讀性更強
- 函數(shù)的可復(fù)制性
- 便于修改
爬取一條騰訊視頻的header內(nèi)容,存入txt菱魔。函數(shù)式編寫:
- 包含網(wǎng)頁鏈接
- 包含title
- 包含所有headers信息
爬取鏈家二手房數(shù)據(jù)-深圳
import requests
from bs4 import BeautifulSoup
import re
def url_analysis(u, h, s, n):
'''
用于分析網(wǎng)頁留荔,最后得到一個含有二級網(wǎng)址的標(biāo)簽列表
u:起始網(wǎng)址
h:頭部信息
s:二級網(wǎng)址包含特定字段
n:頁碼
'''
url_lst=[]
for i in range(1,n+1):
if i == 1:
r = requests.get(url=u+'nb1rs深圳/')
else:
r = requests.get(url=u+'pg'+str(i)+'nb1rs深圳/')
soup = BeautifulSoup(r.text,'lxml')
r2 = soup.find_all('a',href=re.compile(s))
for j in r2:
r3 = j.attrs['href']
url_lst.append(r3)
return(url_lst)
def content(u,h):
'''
爬取網(wǎng)頁標(biāo)簽信息
u:爬取的二級網(wǎng)址
h:頭部信息
'''
r = requests.get(url=u)
r.encodinge = 'utf-8'
soup = BeautifulSoup(r.text,'lxml')
t = soup.title.text #爬取標(biāo)題
toprice = soup.find('div',class_='price').find('span',class_='total').text
unprice = soup.find('div',class_='unitPrice').find('span',class_='unitPriceValue').text
area = soup.find('div',class_='area').find('div',class_='mainInfo').text
base = soup.find('div',class_='base').find('div',class_='content').find_all('li')
year = base[-1].text
pattern = 'resblockPosition:\'(.*?)\',' #.*?任意字符
position =re.search(pattern,r.text).group(1)
lng = position.split(',')[0]
lat = position.split(',')[1]
return([t,',', toprice,',', unprice,',', area,',', year,',',lng,',',lat,'\n'])
if __name__ == '__main__': #main函數(shù)
web_u = 'https://sz.lianjia.com/ershoufang/'
web_h = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'TY_SESSION_ID=93f5b43a-5dc9-4d96-b57a-a4eb78f8dc15; lianjia_uuid=614ed9e0-dc25-421f-ba8b-141c574dbb47; _smt_uid=5b80defd.8430805; UM_distinctid=1656f670d3e4ff-02814a7ed21053-b34356b-1fa400-1656f670d3fdd7; _jzqx=1.1535172349.1535172349.1.jzqsr=bj%2Elianjia%2Ecom|jzqct=/.-; _ga=GA1.2.50227061.1535172352; ljref=pc_sem_baidu_ppzq_x; lianjia_ssid=dbe87b29-353a-45c2-97cf-aae666e2771b; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1535172349,1535201139,1535358484; _jzqa=1.3976151446564617700.1535172349.1535201139.1535358484.3; _jzqc=1; _jzqy=1.1535201139.1535358484.1.jzqsr=baidu|jzqct=%E9%93%BE%E5%AE%B6%E7%BD%91.-; _jzqckmp=1; _gid=GA1.2.1182771159.1535358486; select_city=440300; all-lj=c32edd623b8a5a59c7de54c92107bb6c; _qzjc=1; CNZZDATA1255849469=275538323-1535355329-%7C1535355329; CNZZDATA1254525948=1806440598-1535354494-%7C1535354494; CNZZDATA1255633284=72361912-1535358081-%7C1535358081; CNZZDATA1255604082=1229464985-1535356409-%7C1535356409; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1535359605; _qzja=1.1736056849.1535358739249.1535358739249.1535358739249.1535359600160.1535359605575.0.0.0.10.1; _qzjb=1.1535358739249.10.0.0.0; _qzjto=10.1.0; _jzqb=1.15.10.1535358484.1',
'Host': 'sz.lianjia.com',
'Referer': 'https',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}
web_s = 'https://sz.lianjia.com/ershoufang/105'
web_n = 3
f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\lianjia.txt','w')
f.seek(0)
f.write('title,total_price萬元,unprice元/平方米,area平方米,產(chǎn)權(quán)年限,lng,lat\n')
for i in url_analysis(web_u, web_h, web_s, web_n):
data = content(i,web_h)
f.writelines(data)
print(data)
f.close()
print('finished!')