經(jīng)過本人的測試, 知名的網(wǎng)站基本上都會封禁西刺代理上的ip,當(dāng)然也不排除有漏網(wǎng)之魚, 如果大家要大量爬取知名網(wǎng)站的數(shù)據(jù), 建議使用其他有效的代理IP.
import re
import requests
from bs4 import BeautifulSoup
import time
import random
# 測試是否可用
# ip為傳入的ip : 0.0.0.0:8000
# test_url 為測試的目標(biāo)網(wǎng)站, 可以看作你將要爬取得網(wǎng)站
# tiem_out 請求的延遲
def test_ip(ip, test_url='https://movie.douban.com/', time_out=0.3):
proxies = {'https': ip} # 代理
global all # 使用全局變量
j = 0
while j < 3: # 一共測試3次
try:
# 請求目標(biāo)網(wǎng)址
r = requests.get(test_url, proxies=proxies, timeout=time_out)
# 如果返回的狀態(tài)碼為200, 則表示成功
if r.status_code == 200:
print('***************測試通過%s**************' % ip)
all.append(ip) # 將通過的ip加入到列表中
break
else:
print('請求失敗%s' % ip)
except:
print('請求過程錯誤%s' % ip)
j += 1
print('-----------------這是第 %d 次測試----------------' % j)
url = 'http://www.xicidaili.com/nn/'
# 請求頭池, 也是防止反爬手段的一種
user_list = ["Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3"]
# 設(shè)置全局變量, 用來存取測試通過的IP
all = []
# 一個裝爬下來IP的容器
ip_list = []
# 爬取20頁的數(shù)據(jù)
for page in range(20):
url += str(page)
headers = {
# 每一次爬取選擇隨機(jī)的請求頭
'User-Agent': random.choice(user_list)
}
#每一次爬取隨機(jī)暫停秒數(shù), 采用是隨機(jī)浮點數(shù)
time.sleep(random.uniform(0, 4))
# 解析西刺代理的頁面結(jié)構(gòu)
res = requests.get(url, headers=headers).text
soup = BeautifulSoup(res, 'lxml')
ips = soup.find_all('', {'class': 'odd'})
for ip_ in ips:
ip = re.findall(r'<td>(.*)</td>', str(ip_))
ip_list.append(ip[0] + ':' + ip[1])
print(ip[0] + ':' + ip[1])
# 測試爬取得每個ip
for ip in ip_list:
test_ip(ip)
# 打印出測試通過的ip, 當(dāng)然也可以持久化到數(shù)據(jù)庫中或者存放到本地中
print(all)