利用 selenium + PhantomJS 爬取 妹子圖
import requests
import time
import os
from selenium import webdriver
class Spider(object):
def __init__(self):
self.driver = webdriver.PhantomJS()
def Get_pages(self, maxpage):
# 想要爬取的頁(yè)面數(shù)量
for page in range(1, maxpage+1):
url = "http://www.mzitu.com/page/" + str(page)
self.Get_content_page(url)
def Get_content_page(self, url):
# 獲取封面圖片對(duì)應(yīng)的詳細(xì)圖片地址
self.driver.get(url)
a = self.driver.find_elements_by_id('pins')
b = a[0].find_elements_by_tag_name('a')
links = []
for i in b:
if i.get_attribute('href') not in links: # 該網(wǎng)頁(yè)源碼片段有兩次網(wǎng)址重復(fù)胖替,過濾下
links.append(i.get_attribute('href'))
for link in links:
self.Get_picture_page(link)
def Get_picture_page(self, url):
#獲取該組圖片數(shù)量及圖片下載地址
self.driver.get(url)
title_site = self.driver.find_element_by_tag_name('h2') #該組圖片名字位置
title = title_site.text #該組圖片名字
os.mkdir(title) #以該名字建立文件夾
pages_site = self.driver.find_element_by_class_name('pagenavi') #從標(biāo)簽欄找出圖片最大頁(yè)數(shù)
all_sites = pages_site.find_elements_by_tag_name('a')
page = []
for i in all_sites:
page.append(i.text)
picture_max_number = int(page[-2]) + 1 #最大頁(yè)數(shù)在倒二位置
for i in range(1, picture_max_number):
picture_link = url + '/' + str(i)
self.Download_picture(picture_link, title, i)
print('Done one')
def Download_picture(self,link,filename,picture_number):
#下載圖片
headers = { #圖片下載鏈接的請(qǐng)求頭
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'i.meizitu.net',
'If-Modified-Since': 'Thu, 02 Mar 2017 14:12:51 GMT',
'If-None-Match': "58b82863-28887",
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
time.sleep(1)
self.driver.get(link)
path = os.getcwd() + '\\' + filename + '\\' + str(picture_number) + '.jpg'
e = self.driver.find_element_by_tag_name('p')
f = e.find_element_by_tag_name('img')
picture_download_link = f.get_attribute('src')
picture = requests.get(picture_download_link, headers=headers)
if picture.status_code == 200:
open(path, 'wb').write(picture.content)
if __name__ == '__main__':
spider = Spider()
spider.Get_pages(3)