爬取小豬短租上海租房前三頁的租房信息蔫慧,并將數(shù)據(jù)存入Mongodb
代碼如下
from bs4 import BeautifulSoup
import requests
import time
import pymongo
def get_detail_info(url, data=None):
# 爬取單條租房信息(標(biāo)題释涛,圖片缰趋,房東寺鸥,日租金蛤虐,房東性別,房東頭像)
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
time.sleep(2)
title = soup.select('h4 > em')[0].get_text()
address = soup.select('span.pr5')[0].get_text()
rent = soup.select('div.day_l > span')[0].get_text()
image = soup.select('#curBigImage')[0].get('src')
lorder_pic = soup.select('div.member_pic > a > img')[0].get('src')
lorder_name = soup.select('a.lorder_name')[0].get_text()
lorder_sex = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > span')[0].get('class')
def get_gender(class_name):
if class_name == "member_boy_ico":
return "男"
else:
return "女"
data = {
'標(biāo)題': title,
'地址': address,
'日租金': rent,
'圖片': image,
'房東頭像': lorder_pic,
'房東姓名': lorder_name,
'房東性別': get_gender(lorder_sex)
}
print(data)
return data
def get_all_data(urls):
# 爬取所有租房信息
all_data = []
for url in urls:
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
links = soup.select('#page_list > ul > li > a')
for link in links:
href = link.get('href')
all_data.append(get_detail_info(href))
return all_data
# 定義數(shù)據(jù)庫
client = pymongo.MongoClient('localhost', 27017)
rent_info = client['rent_info'] # 給數(shù)據(jù)庫命名
sheet_table = rent_info['sheet_table'] # 創(chuàng)建表單
urls = ['http://sh.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1, 4)]
# 3頁的租房信息的鏈接
datas = get_all_data(urls)
for item in datas:
# 將數(shù)據(jù)存入數(shù)據(jù)庫
sheet_table.insert_one(item)
# for item in sheet_table.find():
# 篩選出日租金大于等于500的租房信息,并打印出來
# if int(item['日租金']) >= 500:
# print(item)
運(yùn)行截圖
總結(jié):
python中用pymongo庫操作mongodb友酱,初始化數(shù)據(jù)庫要定義數(shù)據(jù)庫名字晴音,表單名。
在數(shù)據(jù)庫中篩選用$lt, $lte, $gt, $gte