#-*-coding:utf-8-*-
import requests
from bs4 import BeautifulSoup
import csv
shine_cinema='http://www.ygdy8.com'
user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
headers={'User-Agent':user_agent} #設(shè)置請(qǐng)求頭,無此設(shè)置時(shí)纷捞,在爬取至60頁時(shí),會(huì)禁止爬取
base_url='http://www.ygdy8.com/html/gndy/china/list_4_' #在此基礎(chǔ)上灾馒,對(duì)網(wǎng)頁進(jìn)行迭代
entry_url='http://www.ygdy8.com/html/gndy/china/index.html' #第一頁網(wǎng)址比較特殊梁只,獨(dú)立出來進(jìn)行爬取
# f=open('china-cinema.csv','a+',newline='',encoding='utf-8')
# csvWrite=csv.writer(f)
#請(qǐng)求網(wǎng)頁
def download(url):
r=requests.get(url,headers=headers)
r.encoding='gb2312' #處理中文顯示亂碼
return r
#獲取電影下載地址
def get_download_url(url):
r=download(url)
soup=BeautifulSoup(r.text,'lxml')
download_url=soup.select('table tr > td > a')[0]['href']
# print(type(download_url))
return download_url
#獲取電影相關(guān)信息
def get_ciname_url(url):
soup = BeautifulSoup(url, 'lxml')
link=soup.select('b > a:nth-of-type(2)')
for i in range(len(link)):
download_url=get_download_url(shine_cinema+link[i]['href'])
print(link[i].text,'地址:'+shine_cinema+link[i]['href'],'下載地址:'+download_url)
def iter_url():
for i in range(2,94):
r=download(base_url+str(i)+'.html')
print("這是第{}頁".format(i))
get_ciname_url(r.text)
index=download(entry_url)
get_ciname_url(index.text)
iter_url()
# Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36
1.png