周末玩游戲時(shí)看到了不少好看的皮膚暴构,就想著下載下來當(dāng)做壁紙。想到做到耗绿,立馬開搞砾隅;
其實(shí)主要分為兩大步就可以搞定:
- 首先需要用python寫一個(gè)利用url地址下載圖片的方法;
- 然后通過分析要下載的頁面晴埂,寫一個(gè)通過selenium批量獲取下載地址的代碼;
利用python下載網(wǎng)絡(luò)圖片的代碼
import urllib.request
def download_url_img(img_url, img_address):
response = urllib.request.urlopen(img_url)
img = response.read()
with open(img_address, 'wb') as f:
f.write(img)
利用selenium定位元素獲取圖片地址,完整代碼如下
import os
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from Utils.down_save_url_img import download_url_img
def driver_wait(dr, xpath):
WebDriverWait(dr, 20, 2).until(EC.presence_of_element_located((By.XPATH, xpath)))
iterms = []
driver = webdriver.Chrome()
driver.get('https://pvp.qq.com/web201605/wallpaper.shtml###')
while True:
sleep(2)
img_class_names = driver.find_element_by_class_name('p_hd').find_elements_by_class_name('p_newhero_item')
for class_name in img_class_names:
name = class_name.find_element_by_tag_name('h4').find_element_by_tag_name('a').text
lis = class_name.find_element_by_tag_name('ul').find_elements_by_tag_name('li') # 獲取ul下的所有l(wèi)i
img_url = lis[1].find_element_by_tag_name('a').get_attribute("href") # 選取自己需要的li
iterm = name, img_url
iterms.append(iterm)
totalpage = driver.find_element_by_class_name('totalpage').text
if int(totalpage.split('/')[0]) != int(totalpage.split('/')[1]): # 判斷是否為最后一頁
# 點(diǎn)擊下一頁
driver.find_element_by_class_name('downpage').click()
else:
break
driver.quit()
print(iterms)
for iterm in iterms:
address = "/Users/Pictures/wzry/"
os.chdir(address) # 更換目錄
img_address = iterm[0] + '_1280*720.jpg'
download_url_img(iterm[1], img_address)