基于以往的經(jīng)驗(yàn),用python將教育部官網(wǎng)的1997-2019年的教育統(tǒng)計(jì)數(shù)據(jù)爬下來辜妓,保存到excel里叨襟。
數(shù)據(jù)來源:教育部官網(wǎng)【moe.gov.cn/】-文獻(xiàn)-教育統(tǒng)計(jì)數(shù)據(jù)
舉例來說:
這次相較于之前,就多用了個(gè)函數(shù)pandas.read_html
育勺,將網(wǎng)頁表格轉(zhuǎn)成數(shù)據(jù)框蕾额,進(jìn)而導(dǎo)出excel早芭。
按照官網(wǎng)的層級建立文件夾,結(jié)果示例:
過程呢诅蝶,基本上也就是先分析網(wǎng)頁情況結(jié)構(gòu)退个,所有年份中,只有2010-2012年這3年沒有分類秤涩,其他的都有2級文件夾帜乞。
因?yàn)楹ε屡廊√l繁了,就設(shè)立了隨機(jī)睡眠時(shí)間筐眷,時(shí)間還挺長的黎烈,在爬取整個(gè)數(shù)據(jù)的過程中就可以去干其他的事情了。
具體代碼:
import pandas as pd
from urllib import request
import time,random,re,os
import urllib.request
from lxml import etree
# 隨機(jī)獲取headers
def getheaders():
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36"
]
UserAgent = random.choice(user_agent_list)
header = {'User-Agent':UserAgent}
return header
# 獲取頁面html
def get_page(url):
headers = getheaders()
req = urllib.request.Request(url = url, headers = headers)
html = urllib.request.urlopen(req).read().decode('utf_8')
time.sleep(random.random()*3)
return html
# 獲取每年鏈接及標(biāo)題
def get_every_year_title_url_ls():
url = 'http://www.moe.gov.cn/s78/A03/moe_560/jytjsj_2019/'
html = get_page(url)
selector = etree.HTML(html)
every_year_title_url_li = selector.xpath('/html/body/div[1]/div/div[5]/div[1]/ul/li')
every_year_title_url_ls = []
for li in every_year_title_url_li:
every_year_title_url = []
# 獲取鏈接標(biāo)題
every_year_title = li.xpath('a/text()')[0]
every_year_title_url.append(every_year_title)
# 獲取鏈接
every_year_url = li.xpath('a/@href')[0]
if '../' in every_year_url:
final_url = 'http://www.moe.gov.cn/s78/A03/moe_560/' + every_year_url.replace('./','').replace('.','')
else:
final_url = url
every_year_title_url.append(final_url)
every_year_title_url_ls.append(every_year_title_url)
return every_year_title_url_ls
# 獲取當(dāng)年的分類鏈接及標(biāo)題[2010-2012年這3年沒有分類鏈接]
def get_category_title_url_ls(url):
html = get_page(url)
selector = etree.HTML(html)
category_title_url_li = selector.xpath('//*[@id="list"]/li')
category_title_url_ls = []
for li in category_title_url_li:
category_title_url = []
# 獲取鏈接標(biāo)題
category_title = li.xpath('a/text()')[0]
category_title_url.append(category_title)
# 獲取鏈接
category_url = li.xpath('a/@href')[0].replace('./','')
category_title_url.append(url + category_url)
category_title_url_ls.append(category_title_url)
return category_title_url_ls
# 獲取每個(gè)分類鏈接的頁面數(shù)量
def get_page_num(url):
html = get_page(url)
item_num = int(re.findall(r'var recordCount = (.+?);',html)[0])
print('共' + str(item_num) + '條信息')
if item_num < 20:
page_num = 1
elif item_num%20 == 0:
page_num = item_num // 20
else:
page_num = item_num // 20 + 1
return page_num
# 獲取每個(gè)分類鏈接下每頁頁面的鏈接及標(biāo)題
def get_page_url_ls(url):
page_num = get_page_num(url)
page_url_ls = [url]
for i in range(page_num - 1):
page_url = url + 'index_'+ str(i + 1) + '.html'
page_url_ls.append(page_url)
return page_url_ls
# 獲取每頁內(nèi)的所有鏈接及標(biāo)題
def get_item_title_url_ls(url):
html = get_page(url)
selector = etree.HTML(html)
item_title_url_li = selector.xpath('//*[@id="list"]/li')
item_title_url_ls = []
for li in item_title_url_li:
item_title_url = []
# 獲取鏈接標(biāo)題
item_title = li.xpath('a/text()')[0]
item_title_url.append(item_title)
# 獲取鏈接
item_url = li.xpath('a/@href')[0].replace('./','')
if 'index_' in url:
new_url = '/'.join(url.split('/')[:-1]) + '/'
final_url = new_url + item_url
else:
final_url = url + item_url
item_title_url.append(final_url)
item_title_url_ls.append(item_title_url)
return item_title_url_ls
# 獲得每個(gè)item的df
def get_df(url):
html = get_page(url)
df = pd.read_html(html,skiprows = 6, header = 0)[0]
df = df.drop(df.tail(6).index)
return df
# 將df導(dǎo)入excel
def to_excel(excel_path, title_url):
title = title_url[0].replace('\r','').replace('\n','').replace('\u3000','').replace('\t','')
item_url = title_url[1]
df = get_df(item_url)
if not os.path.exists(excel_path):
os.makedirs(excel_path)
excel_name = excel_path + title + '.xlsx'
df.to_excel(excel_name,index = 0)
if __name__ == "__main__":
every_year_title_url_ls = get_every_year_title_url_ls()
for n,i in enumerate(every_year_title_url_ls):
print('【文件夾】',str(2019 - n) + '年' )
fold1 = i[0]
# 2010,2011,2012年只有1級文件夾
if 2019-n == 2010 or 2019-n == 2011 or 2019-n == 2012:
for n in get_page_url_ls(i[1]):
for l in get_item_title_url_ls(n):
print(l)
excel_path = 'D:\\2_study\\4_實(shí)戰(zhàn)\\python\\jyb_sta\\data\\' + fold1 + '\\'
to_excel(excel_path, l)
else:
category_title_url_ls = get_category_title_url_ls(i[1])
for j in category_title_url_ls:
print('【文件夾】',j)
fold2 = j[0]
# 2004年的非“附表”有3級文件夾
if 2019-n == 2004 and '附表' != j[0]:
for z in get_category_title_url_ls(j[1]):
print('【文件夾】',z)
fold3 = z[0]
for m in get_page_url_ls(z[1]):
for k in get_item_title_url_ls(m):
print(k)
excel_path = 'D:\\2_study\\4_實(shí)戰(zhàn)\\python\\jyb_sta\\data\\' + fold1 + '\\' + fold2 + '\\' + fold3 + '\\'
to_excel(excel_path, k)
else:
for m in get_page_url_ls(j[1]):
for k in get_item_title_url_ls(m):
print(k)
excel_path = 'D:\\2_study\\4_實(shí)戰(zhàn)\\python\\jyb_sta\\data\\' + fold1 + '\\' + fold2 + '\\'
to_excel(excel_path, k)
當(dāng)然匀谣,爬取下來的數(shù)據(jù)還需要根據(jù)具體需要進(jìn)行整理了照棋,比如,把某類數(shù)據(jù)按照年份合并起來武翎,這個(gè)當(dāng)然也可以用python來執(zhí)行啦~
GZ號:amazingdata (數(shù)據(jù)格子鋪)
后臺回復(fù):教育統(tǒng)計(jì)數(shù)據(jù)烈炭,可下載所有的excel數(shù)據(jù)