xpath:在html中查找信息姑尺,對xml文檔中的元素進行遍歷和屬性的提取
xml:是為了傳輸數(shù)據(jù),結(jié)構(gòu)域和HTML非常相像茎毁,是一種標記語言
xpath常見的語法:
nodename:選取此節(jié)點中的所有子節(jié)點
/:從根節(jié)點開始查找
//: 匹配節(jié)點,不考慮節(jié)點位置
. :選取當前節(jié)點
..:選取當前節(jié)點的父節(jié)點
@:取標簽的屬性 a/@href 取a標簽的href屬性
a/text():取標簽的文本
a[@class='123]:根據(jù)某個(class)屬性尋找標簽 a[@id='123]
a[@id='123'][last()]:取最后一個id為123的a標簽
a[@id='123'][position()<2]:取id為123的前兩個標簽
li[@id="123"][position()=2]:取第二個id為1123的li標簽
"""
import requests,re
from lxml.html import etree
# 案例()
# http://www.budejie.com/audio/
# http://www.budejie.com/audio/2
def load_page_data(url):
"""
下載器根據(jù)頁面源碼獲取url分頁地址
:param url:
:return:
"""
# proxies = {
# 'http': '59.37.33.62:50686',
# 'https': '61.128.208.94:3128',
# 'https': 'http://username:password@ip:port' # 私密代理
# }
req_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
response = requests.get(url,headers=req_headers)
response.encoding = 'utf-8'
if response.status_code ==200:
print('請求成功')
status = parse_page_data(response.text)
# with open('page.html','w',encoding='utf-8') as file:
# file.write(response.text)
if status:
# 請求下一頁
pattern = re.compile('\d+')
# 當前頁碼
cur_page = re.search(pattern,response.url).group()
# 下一頁
next_page = int(cur_page)+1
# 下一頁url
next_page_url = re.sub(pattern,str(next_page),response.url)
load_page_data(next_page_url)
def parse_page_data(html):
"""
使用xpath從頁面源碼提取數(shù)據(jù)
:param html:
:return:
"""
# 使用etree
html_element = etree.HTML(html)
auto_list = html_element.xpath('//div[@class="j-r-c"]/div[@class="j-r-list"]/ul/li')
# print(auto_list)
# print(type(auto_list))
for auto in auto_list:
auto_data = {}
# 取出標題
auto_data['name'] = auto.xpath('.//a[@class="u-user-name"]/text()')[0]
# 取出內(nèi)容
auto_data['content'] = auto.xpath('.//div[@class="j-r-list-c-desc"]/text()')[0]
# 發(fā)布時間
auto_data['pubLishTime'] = auto.xpath('.//span[@class="u-time f-ib f-fr"]/text()')[0]
# 點贊數(shù)
auto_data['zanNum'] = auto.xpath('.//li[@class="j-r-list-tool-l-up"]/span/text()')[0]
# 不喜歡
auto_data['lowNum'] = auto.xpath('.//li[@class="j-r-list-tool-l-down "]/span/text()')
#3 音頻
auto_data['url'] = auto.xpath('.//div[@class=" j-audio"]/@data-mp3')[0]
print(auto_data)
download_audio_by_url(auto_data['url'], auto_data)
if len(auto_list)>0:
return True
else:
return False
def download_audio_by_url(url,auto):
proxies = {
'http': '59.37.33.62:50686',
'https': '61.128.208.94:3128',
'https': 'http://username:password@ip:port' # 私密代理
}
req_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
response = requests.get(url,headers=req_header)
if response.status_code == 200:
# print(response.url, '下載成功')
filename = response.url[-17:]
with open('baisibudejie/' + filename, 'wb') as file:
file.write(response.content)
auto['localpath'] = 'baisibudejie/' + filename
print('完成')
# 將數(shù)據(jù)存儲到數(shù)據(jù)庫
save_data_to_db(auto)
def save_data_to_db(audio):
print(audio)
if __name__ == "__main__":
start_url ='http://www.budejie.com/audio/1'
load_page_data(start_url)