感想
這是自己的第一個(gè)針對(duì)真實(shí)網(wǎng)頁(yè)的爬蟲(chóng)型诚,寫(xiě)完真是滿滿的自豪感。從來(lái)沒(méi)有想過(guò)鸳劳,在這么短的時(shí)間里狰贯,可以那么快學(xué)會(huì)如何寫(xiě)爬蟲(chóng)。雖然現(xiàn)在寫(xiě)的還很簡(jiǎn)單粗糙赏廓,但會(huì)一直努力下去涵紊。
代碼
from bs4 import BeautifulSoup
import requests
import json
import random
def gender_finder(lorder_genders_raw):
for lorder_gender_raw in lorder_genders_raw:
lorder_genders = lorder_gender_raw.get('class')
b = ['member_ico']
if lorder_genders == b:
result = 'male'
else:
result = 'female'
return result
def get_information(web_data):
soup = BeautifulSoup(web_data.text, 'lxml')
titles = soup.select('div.pho_info > h4 > em')
addresses = soup.select('div.pho_info > p > span')
prices = soup.select('div.day_l > span')
room_pictures = soup.select('#curBigImage')
lorder_pictures = soup.select('div.member_pic > a > img')
lorder_genders_raw = soup.select('div.member_pic > div')
lorder_names = soup.select('div.w_240 > h6 > a')
lorder_genders = gender_finder(lorder_genders_raw)
for title, address, price, room_picture, lorder_gender, lorder_picture, lorder_name in zip(
titles, addresses, prices, room_pictures, lorder_genders, lorder_pictures, lorder_names):
data = {
'title': title.get_text(),
'address': address.get_text().strip('\n '),
'price': price.get_text(),
'room_picture': room_picture.get('src'),
'lorder_gender': lorder_gender,
'lorder_name':lorder_name.get_text(),
'lorder_picture':lorder_picture.get('src')
}
return data
def get_url(url_content):
content_data = requests.get(url_content,proxies=random.choice(ips), timeout=6)
soup = BeautifulSoup(content_data.text, 'lxml')
urls_raw = soup.select('#page_list > ul > li > a')
for url in urls_raw:
urls = [url.get('href') for url in urls_raw]
return(urls)
resp = requests.get("http://tor1024.com/static/proxy_pool.txt")
ips_txt = resp.text.strip().split("\n")
ips = []
for i in ips_txt:
try:
k = json.loads(i)
ips.append(k)
except Exception as e:
print(e)
url_contents = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1,11,1)]
for url_content in url_contents:
urls = get_url(url_content)
for url in urls:
web_data = requests.get(url, proxies=random.choice(ips), timeout=6)
data=get_information(web_data)
print(data)
總結(jié)
- 爬網(wǎng)頁(yè)一定要記得用代理,用代理幔摸,用代理
- 可以用.strip去掉爬取的信息中不必要的部分
- 用函數(shù)的方式摸柄,把程序分成小塊寫(xiě),會(huì)比較容易