爬取58同城平板電腦頻道所有產(chǎn)品信息
效果是這樣的:
個人分類產(chǎn)品信息
商家分析產(chǎn)品信息
我的代碼:
from bs4 import BeautifulSoup
import requestsimport time
import time
def get_cate_lists():
for num1 in range(0, 2):####0:個人 1:商家
for num2 in range(1, 11):##頁碼,爬取前10頁信息
url_cates = ['http://bj.58.com/pbdn/{}/pn{}/'.format(num1, num2)][0]
get_link_from(url_cates)
def get_link_from(url):
web_data=requests.get(url)
soup=BeautifulSoup(web_data.text,'lxml')
links_list=soup.select('a[class="t"]')
for link in links_list:
href=link.get('href').split('?')[0]
get_detail_info(href)
def get_views(url):
id=url.split('/')[-1].strip('x.shtml')
new_url='http://jst1.58.com/counter?infoid={}'.format(id)
headers={'Referer':url}
web_data=requests.get(new_url,headers=headers)
views=web_data.text.split('=')[-1]
#print(views)
return views
def get_detail_info(url):
time.sleep(1)
web_data=requests.get(url)
soup=BeautifulSoup(web_data.text,'lxml')
if url[-7]=='x':##過濾商家URLhttp://bj.58.com/pingbandiannao/28519635853996x.shtml 商家
info={
'title':soup.select('div.col_sub.mainTitle > h1')[0].get_text(),
'area':list(soup.select('span.c_25d')[0].stripped_strings) if soup.find_all('span','c_25d') else None,
'price':soup.select('span.price.c_f50')[0].text,
'cate':'商家',
'view':int(get_views(url))
}
print(info)
elif url[-7]=='z':#過濾個人URL http://zhuanzhuan.58.com/detail/744170697423355907z.shtml
info={
'title':soup.select('h1.info_titile')[0].get_text(),
'area':soup.select('div.palce_li > span > i')[0].text,
'price':int(soup.select('div.price_li > span > i')[0].text),
'cate':'個人',
'view':int(soup.select('span.look_time')[0].text.strip('次瀏覽'))
}
print(info)
else:
print('過濾促銷產(chǎn)品')
if __name__=='__main__':
get_cate_lists()
總結(jié):
- 掌握BeautifulSoup、requests模塊基本用法趣苏;
- 了解HTMS狡相、CSS的基礎(chǔ)語句,輔助進(jìn)行需求點(diǎn)的篩查食磕;
- get(),get.text().find_all()尽棕,stripped.strings方法的使用進(jìn)行數(shù)據(jù)清洗;
- for循環(huán)嵌套與列表推導(dǎo)式的靈活運(yùn)用彬伦;
- if條件語句的適當(dāng)運(yùn)用解決運(yùn)行報(bào)錯而無法進(jìn)行下去的問題