開(kāi)發(fā)環(huán)境
IDE: pyCharm
python環(huán)境: 3.7
爬取百度圖片的基本步驟
一. 抓取百度網(wǎng)站內(nèi)容
def read_html(urlStr):
result = request.urlopen(urlStr)
webResult = result.read()
with codecs.open('/Users/liliqiang/Desktop/imageFile/webResult.txt', 'w', 'utf-8') as file:
file.write(webResult.decode('utf-8'))
# 保存數(shù)據(jù)
return webResult
二. 提取圖片內(nèi)容
def read_image(webResult):
re_img = re.compile(b'"thumbURL":"(.*?.jpg)",')
# re_img = re.compile(b'src="(.*?.jpg)" ')
imgs = re_img.findall(webResult)
imgsData = []
for img in imgs:
imgsData.append(img.decode('utf-8'))
with open('/Users/liliqiang/Desktop/imageFile/images.txt', 'w') as file:
json.dump(imgsData, file)
return imgsData
三. 下載圖片
def save_image(imgs):
count = 0
for img in imgs:
tail = img[-3:]
filename = '/Users/liliqiang/Desktop/imageFile/%s.%s'%(count, tail)
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'ozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Mobile Safari/537.36')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(img, filename)
count += 1
后記: 遇到的問(wèn)題
I. [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed
解決方法: 關(guān)閉ssl驗(yàn)證
ssl._create_default_https_context = ssl._create_unverified_context
II.urlretrieve HTTP Error 403: Forbidden
很多網(wǎng)站禁止爬蟲(chóng). 我們可以模擬瀏覽器的進(jìn)行圖片抓取, 模擬網(wǎng)站需要設(shè)置請(qǐng)求頭的User-Agent
, 假如示例中的代碼不可用, 可以查看自己瀏覽器的請(qǐng)求頭, 然后替換為自己的.就可以正常使用了
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(img, filename)
III. UnicodeEncodeError: 'ascii' codec can't encode characters in position 65-69: ordinal not in range(128)
由于請(qǐng)求文件中含有特殊符號(hào)??
導(dǎo)致urlopen
函數(shù)無(wú)法正常識(shí)別, 需要ASCII符號(hào)編碼. 使用函數(shù) urllib.parse.quote
可以經(jīng)ASCII
碼轉(zhuǎn)化為utf8
解決方法:
urlStr = 'http://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gb18030&fr=ala&ala=1&alatpl=others&pos=0&&word='
urlStr = urlStr + urllib.parse.quote('??')
最后附錄整個(gè)文件內(nèi)容:
#!/usr/bin/python/
# -*- coding: UTF-8 -*-
from urllib import request
import urllib
import re
import ssl
import json
import sys
# 關(guān)閉ssl驗(yàn)證
ssl._create_default_https_context = ssl._create_unverified_context
def read_html(urlStr):
result = request.urlopen(urlStr)
webResult = result.read()
with open('/Users/<#你自己的用戶(hù)名#>/Desktop/imageFile/webResult.txt', 'w') as file:
file.write(webResult.decode('utf-8'))
# 保存數(shù)據(jù)
return webResult
def read_image(webResult):
re_img = re.compile(b'"thumbURL":"(.*?.jpg)",')
# re_img = re.compile(b'src="(.*?.jpg)" ')
imgs = re_img.findall(webResult)
imgsData = []
for img in imgs:
imgsData.append(img.decode('utf-8'))
with open('/Users/你自己的用戶(hù)名/Desktop/imageFile/images.txt', 'w') as file:
json.dump(imgsData, file)
return imgsData
def save_image(imgs):
count = 0
for img in imgs:
tail = img[-3:]
filename = '/Users/<#你自己的用戶(hù)名#>/Desktop/imageFile/%s.%s'%(count, tail)
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(img, filename)
count += 1
urlStr = 'http://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gb18030&fr=ala&ala=1&alatpl=others&pos=0&&word='
urlStr = urlStr + urllib.parse.quote('??')
print('讀取網(wǎng)絡(luò)數(shù)據(jù)中...')
webRes = read_html(urlStr)
print('讀取圖片資源...')
imgs = read_image(webRes)
print('下載圖片資源中...')
save_image(imgs)
print('下載圖片資源完成!!')