使用BeautifulSoup入門源碼
視頻地址
使用python2.7
使用Chrome網(wǎng)頁,右鍵檢查工具獲取需要內(nèi)容的類或者id
#coding=utf-8
import requests #網(wǎng)絡(luò)請(qǐng)求
from bs4 import BeautifulSoup#網(wǎng)頁元素解析
import json#json數(shù)據(jù)解析
import re#正則表達(dá)式
import time#延時(shí)
import pandas#數(shù)據(jù)分析
import sqlite3#數(shù)據(jù)存儲(chǔ)
#newsurl = "http://news.sina.com.cn/china"
#獲取評(píng)論數(shù)
common_js = "http://comment5.news.sina.com.cn/page/info?version=1&format=json&channel=gn&newsid=comos-{}&group=undefined&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=3&t_size=3&h_size=3&thread=1&callback=jsonp_1517920852693&_=1517920852693"
#獲取評(píng)論數(shù)
def getNewsConmmontUrl(new_url):
# news_id = sub_url.split("/")[-1].strip("doc-i").rstrip(".shtml")
#print(news_id)
news_id = re.search("doc-i(.+).shtml",new_url).group(1) #使用正則獲取新聞id
res = requests.get(common_js.format(news_id))
response = res.text.strip("jsonp_1517920852693(")[:-1]
jd = json.loads(response)
if jd["result"] != None and jd["result"]["count"] != None and jd["result"]["count"]["total"]:
return jd["result"]["count"]["total"]
return 0
#獲取文章詳情
def getNewsDetail(sub_new_url):
result = {}
res = requests.get(sub_new_url)
soup = BeautifulSoup(res.text.encode(res.encoding).decode('utf-8'),"lxml")#lxml是編譯器類型漩符,也可指定html.parser
result["articleUrl"] = sub_new_url;
result["articleTitle"] = soup.select(".main-title")[0].text#文章標(biāo)題
result["articleTime"] = soup.select(".date-source")[0].select("span")[0].text#時(shí)間
result["articleContent"] = "\n".join([p.text.strip() for p in soup.select("#article p")[:-1]]) #文章內(nèi)容
result["articleAuthor"] = soup.select(".show_author")[0].text#作者
result["articleComment"] = getNewsConmmontUrl(sub_new_url)#評(píng)論
if len(soup.select(".img_wrapper")) > 0:
result["articlePicture"] = soup.select(".img_wrapper")[0].select("img")[0]["src"]#文章圖片
return result
#分頁鏈接請(qǐng)求
#page_common_url = "http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}&callback=newsloadercallback&_=1518067220351"
page_common_url = "http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}&callback=newsloadercallback"
#獲取分頁數(shù)據(jù)
def getPageDataList(pageIndex):
page_url = page_common_url.format(pageIndex)#填充url
page_url_request = requests.get(page_url)#請(qǐng)求數(shù)據(jù)
page_url_jd = json.loads(page_url_request.text.lstrip(" newsloadercallback(").rstrip(");"))#解析數(shù)據(jù)
sub_url_array = []
for sub_item in page_url_jd["result"]["data"]:
sub_url = sub_item["url"]
if sub_url.find("http") != -1:#判斷是否是http開頭的url
sub_url_array.append(sub_url)
return sub_url_array#新聞url列表
#獲取前2頁新聞鏈接URL
def getTotalNewUrlList():
sub_url_total_array = []
for i in (0,2):
sub_url_total_array.extend(getPageDataList(i))#extend可將數(shù)組中的數(shù)據(jù)一個(gè)一個(gè)的添加進(jìn)入列表荒吏,append是將數(shù)組作為一個(gè)元素添加到列表
return sub_url_total_array#列表沾谜,存放所有文章url
#獲取所有文章內(nèi)容
def getTotalNewsDetail():
total_detail_list = []
for sub_total_url in getTotalNewUrlList():#通過分頁獲取所有新聞鏈接
resultDic = getNewsDetail(sub_total_url)#獲取鏈接詳情字典
time.sleep(1)#爬蟲盡量多的延時(shí)膊毁,也可訪問網(wǎng)站的robots.txt查看是否允許爬蟲及爬蟲的時(shí)間,網(wǎng)站架構(gòu)基跑。婚温。。
for i in resultDic:
print(resultDic[i])#打印文章詳情
total_detail_list.append(resultDic)#添加詳情字典到列表
return total_detail_list
#使用sqlite存儲(chǔ)數(shù)據(jù)媳否,pandas清晰展示數(shù)據(jù)
def write_data(total_news):
df = pandas.DataFrame(total_news)#將數(shù)據(jù)放入pandas中
with sqlite3.connect('news.sqlite') as db:
df.to_sql('news', con = db)#將df數(shù)據(jù)寫入數(shù)據(jù)庫
#讀取數(shù)據(jù)
def read_data():
with sqlite3.connect('news.sqlite') as db:
df = pandas.read_sql_query('select * from news', con = db)#將數(shù)據(jù)庫數(shù)組讀出
print(df)
#入口函數(shù)
def main():
total_news = getTotalNewsDetail()#列表栅螟,獲取新聞詳情
write_data(total_news)#寫入數(shù)據(jù)庫
read_data()#從數(shù)據(jù)庫讀取并打印
if __name__ == '__main__':
main()