本文要解決的問題是如何給校園網(wǎng)推送的文章起個“親人”并且“受歡迎”的名字合陵。
詞頻反映了詞匯的親人度厢汹,而聚類分析則把受歡迎的題目篩選出來馏段。
通過Python爬取校園網(wǎng)特定欄目文章的標題堰塌、閱讀量和回復量涡拘,然后用jieba分詞分好所有的文章標題,最后對文章的閱讀量和回復量進行聚類分析嚼锄,得到結(jié)果减拭。同時,也爬取所有流行公眾號咪蒙的文章題目作為一個參考区丑。
全文分為三部分:
1 通過python 爬取校園網(wǎng)數(shù)據(jù)和傳送門咪蒙的所有文章標題數(shù)據(jù)放入本地放入本地
2 畫出二者的云圖
3 對校園網(wǎng)標題做聚類分析
本篇文章就第一部分進行闡述拧粪。
第一部分:通過python 爬取校園網(wǎng)數(shù)據(jù)放入本地
#爬取'睿思 文章天地 所有的文章修陡、閱讀量、回復量'
from bs4 import BeautifulSoup
import requests
import re
import csv
import time
import codecs
# 訪問賬號
headers = { # 添加header可以將程序偽裝成瀏覽器
"Host": "www.kuaidaili.com",
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
data = []
# 睿思網(wǎng)文章天地從第一頁到最后一頁
urls = ['http://rs.xidian.edu.cn/forum.php?mod=forumdisplay&fid=549&page={}'.format(str(i)) for i in range(1,15,1)]
# 編寫函數(shù)可霎。輸入url,獲得所有文章標題魄鸦、閱讀量和回復量
def get_attractions(url):
wb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(wb_data.text,'lxml')
#緩沖時間
time.sleep(2)
#獲取題目、閱讀量和回復量
titles = soup.select('tbody > tr > th > a.s.xst')
nums_readers = soup.select('tbody > tr > td.num > em ')
nums_backs = soup.select('tbody > tr > td.num > a')
for title, num_read, num_back in zip(titles, nums_readers, nums_backs):
info = {
'title': title.get_text(),
'num_read': num_read.get_text(),
'num_back': num_back.get_text()
}
data.append(info)#存入字典癣朗,并附入data中
#for循環(huán)取出
for single_url in urls:
get_attractions(single_url)
#存入本地csv文件
with codecs.open('/Users/zhangyi/Desktop/2.csv', 'w+', 'utf_8_sig') as csv_file:
#csv_file.write(codecs.BOM_UTF8)
headers = [k for k in data[0]]
writer = csv.DictWriter(csv_file, fieldnames=headers)
writer.writeheader()
for dictionary in data:
writer.writerow(dictionary)
文章標題号杏、閱讀量和回復量
通過python 爬取傳送門咪蒙的所有文章標題數(shù)據(jù)放入本地
# 爬取咪蒙的文章題目
from bs4 import BeautifulSoup
import requests
import csv
import time
import codecs
# 訪問賬號
headers = { # 添加header可以將程序偽裝成瀏覽器
"Host": "www.kuaidaili.com",
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
nums = []
data = []
# 咪蒙的傳送門所有頁面
urls_mm = ['http://chuansong.me/account/mimeng7?start={}'.format(str(i)) for i in range(0,624,12)]
# 編寫函數(shù)。輸入url,獲得所有文章標題斯棒、url和時間
def get_attractions(url):
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
time.sleep(2)
titles = soup.select('a.question_link')
time_post = soup.select('span.timestamp')
for link in titles:
nums.append(link.get('href').split('//')[0])
urls = ['http://chuansong.me{}'.format(str(i)) for i in nums]
for title, time_start, url_s in zip(titles, time_post, urls):
info = {
'title': title.get_text(),
'time_start': time_start.get_text(),
'url_s': url_s
}
data.append(info)
return data
#for循環(huán)取出
for single_url in urls_mm:
get_attractions(single_url)
#存入本地csv文件
with codecs.open('/Users/zhangyi/Desktop/csvs/mm.csv', 'w+', 'utf_8_sig') as csv_file:
#csv_file.write(codecs.BOM_UTF8)
heads = [k for k in data[0]]
writer = csv.DictWriter(csv_file, fieldnames=heads)
writer.writeheader()
for dictionary in data:
writer.writerow(dictionary)
咪蒙文章標題