初學(xué)爬蟲,簡單講下月洛,上面代碼爬的是簡書30日熱門內(nèi)容何恶,運(yùn)用到了抓包所以能夠爬取任意頁面的內(nèi)容,不過在下目前還不會封裝嚼黔,有些粗糙细层,湊合著看吧
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import time
from bs4 import BeautifulSoup
import os
import codecs
import csv
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
url = 'http://www.reibang.com/trending/monthly?seen_snote_ids%5B%5D=9417518&seen_snote_ids%5B%5D=9975670&seen_snote_ids%5B%5D=9983984&seen_snote_ids%5B%5D=9707970&seen_snote_ids%5B%5D=9650477&seen_snote_ids%5B%5D=10065620&seen_snote_ids%5B%5D=10239288&seen_snote_ids%5B%5D=9917498&seen_snote_ids%5B%5D=10066091&seen_snote_ids%5B%5D=10050042&seen_snote_ids%5B%5D=9417837&seen_snote_ids%5B%5D=10133511&seen_snote_ids%5B%5D=9587458&seen_snote_ids%5B%5D=10189199&seen_snote_ids%5B%5D=10443321&seen_snote_ids%5B%5D=10094112&seen_snote_ids%5B%5D=10270938&seen_snote_ids%5B%5D=9654829&seen_snote_ids%5B%5D=8446458&seen_snote_ids%5B%5D=10465818&page='
for a in range(1,100):#爬去前100頁的內(nèi)容
new_url = url + str(a)
page = requests.get(new_url,headers=headers)
soup = BeautifulSoup(page.text,'lxml')
text = soup.find_all('div',{'class':'content'})
for i in text:
authorName = i.find_all('a')[1].get_text() #獲得作者名字
pageTitle = i.find_all('a')[2].get_text()#獲得標(biāo)題
peopleRead = i.find_all('a')[3].get_text()#獲得閱讀量
peopleRead = peopleRead.strip()
peopleComment = i.find_all('a')[4].get_text()#獲得評論數(shù)
peopleComment = peopleComment.strip()
peopleLike = i.find_all('span')[1].get_text()#獲得點(diǎn)贊數(shù)
peopleLike = peopleLike.strip()
pageAbstract = i.find('p', {'class': 'abstract'}).get_text()#獲得摘要
pageAbstract = pageAbstract.strip()
getMoney = i.find_all('span')[-1].get_text() #獲得贊賞數(shù)( 由于是最后一個了,所以很報錯唬涧,只能改為逆向)
authorName = list(authorName.strip().split(','))#生成一維列表
pageTitle = list(pageTitle.strip().split(','))
peopleRead= list(peopleRead.strip().split(','))
peopleComment = list(peopleComment.strip().split(','))
peopleLike = list(peopleLike.strip().split(','))
pageAbstract = list(pageAbstract.strip().split(','))
getMoney = list(getMoney.strip().split(','))
item = [ [a,b,c,d,e,f,g] for a,b,c,d,e,f,g in zip(authorName,pageTitle,pageAbstract,peopleRead,peopleComment,peopleLike,getMoney)]#將七個列表合成一個二維表
#print(item)
#item_name = ['作者', '標(biāo)題', '摘要', '閱讀量', '評論數(shù)', '點(diǎn)贊數(shù)', '贊賞數(shù)']
with open('簡書.csv', 'a+', newline='',encoding="utf_8_sig") as csvfile: #newline=''解決新加入的內(nèi)容有一行空行
csvfile.write("\xef\xbb\xbf")#解決亂碼問題
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(item)
time.sleep(1)#設(shè)置間隔時間疫赎,防止被封IP