反爬蟲機(jī)制
標(biāo)簽(空格分隔): python scrapy
scrapy 架構(gòu)
useragent 用戶代理切換
fake-useragent
用戶代理切換引用開源項(xiàng)目做處理
class RandomUserAgentMiddleware(object):
# useragent代理切換
def __init__(self,crawler):
super(RandomUserAgentMiddleware,self).__init__
self.ua = UserAgent()
self.ua_type = crawler.settings.get('RANDOM_UA_TYPE','random')
@classmethod
def from_crawler(cls,crawler):
return cls(crawler)
def process_request(self,request,spider):
def get_ua_type():
return getattr(self.ua,self.ua_type)
request.headers.serdefault('User-Agent',get_ua_type())
IP代理
request.meta['proxy'] =
# -*- coding: utf-8 -*-
__author__ = 'bobby'
import requests
from scrapy.selector import Selector
import MySQLdb
conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd="root", db="article_spider", charset="utf8")
cursor = conn.cursor()
def crawl_ips():
#爬取西刺的免費(fèi)ip代理
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"}
for i in range(1568):
re = requests.get("http://www.xicidaili.com/nn/{0}".format(i), headers=headers)
selector = Selector(text=re.text)
all_trs = selector.css("#ip_list tr")
ip_list = []
for tr in all_trs[1:]:
speed_str = tr.css(".bar::attr(title)").extract()[0]
if speed_str:
speed = float(speed_str.split("秒")[0])
all_texts = tr.css("td::text").extract()
ip = all_texts[0]
port = all_texts[1]
proxy_type = all_texts[5]
ip_list.append((ip, port, proxy_type, speed))
for ip_info in ip_list:
cursor.execute(
"insert proxy_ip(ip, port, speed, proxy_type) VALUES('{0}', '{1}', {2}, 'HTTP')".format(
ip_info[0], ip_info[1], ip_info[3]
)
)
conn.commit()
class GetIP(object):
def delete_ip(self, ip):
#從數(shù)據(jù)庫中刪除無效的ip
delete_sql = """
delete from proxy_ip where ip='{0}'
""".format(ip)
cursor.execute(delete_sql)
conn.commit()
return True
def judge_ip(self, ip, port):
#判斷ip是否可用
http_url = "http://www.baidu.com"
proxy_url = "http://{0}:{1}".format(ip, port)
try:
proxy_dict = {
"http":proxy_url,
}
response = requests.get(http_url, proxies=proxy_dict)
except Exception as e:
print ("invalid ip and port")
self.delete_ip(ip)
return False
else:
code = response.status_code
if code >= 200 and code < 300:
print ("effective ip")
return True
else:
print ("invalid ip and port")
self.delete_ip(ip)
return False
def get_random_ip(self):
#從數(shù)據(jù)庫中隨機(jī)獲取一個(gè)可用的ip
random_sql = """
SELECT ip, port FROM proxy_ip
ORDER BY RAND()
LIMIT 1
"""
result = cursor.execute(random_sql)
for ip_info in cursor.fetchall():
ip = ip_info[0]
port = ip_info[1]
judge_re = self.judge_ip(ip, port)
if judge_re:
return "http://{0}:{1}".format(ip, port)
else:
return self.get_random_ip()
# print (crawl_ips())
if __name__ == "__main__":
get_ip = GetIP()
get_ip.get_random_ip()
- 采用爬取西刺ip進(jìn)行
- 采用官方scrapy-crawlera
- 采用tor+vpn
云打碼
- 在線打碼
import json
import requests
class YDMHttp(object):
apiurl = 'http://api.yundama.com/api.php'
username = ''
password = ''
appid = ''
appkey = ''
def __init__(self, username, password, appid, appkey):
self.username = username
self.password = password
self.appid = str(appid)
self.appkey = appkey
def balance(self):
data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response_data = requests.post(self.apiurl, data=data)
ret_data = json.loads(response_data.text)
if ret_data["ret"] == 0:
print ("獲取剩余積分", ret_data["balance"])
return ret_data["balance"]
else:
return None
def login(self):
data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response_data = requests.post(self.apiurl, data=data)
ret_data = json.loads(response_data.text)
if ret_data["ret"] == 0:
print ("登錄成功", ret_data["uid"])
return ret_data["uid"]
else:
return None
def decode(self, filename, codetype, timeout):
data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
files = {'file': open(filename, 'rb')}
response_data = requests.post(self.apiurl, files=files, data=data)
ret_data = json.loads(response_data.text)
if ret_data["ret"] == 0:
print ("識(shí)別成功", ret_data["text"])
return ret_data["text"]
else:
return None
if __name__ == "__main__":
# 用戶名
username = 'da_ge_da1'
# 密碼
password = 'da_ge_da'
# 軟件ID跌前,開發(fā)者分成必要參數(shù)。登錄開發(fā)者后臺(tái)【我的軟件】獲得!
appid = 3129
# 軟件密鑰只泼,開發(fā)者分成必要參數(shù)。登錄開發(fā)者后臺(tái)【我的軟件】獲得!
appkey = '40d5ad41c047179fc797631e3b9c3025'
# 圖片文件
filename = 'getimage.jpg'
# 驗(yàn)證碼類型库说,# 例:1004表示4位字母數(shù)字嘲叔,不同類型收費(fèi)不同。請(qǐng)準(zhǔn)確填寫,否則影響識(shí)別率萝衩。在此查詢所有類型 http://www.yundama.com/price.html
codetype = 1004
# 超時(shí)時(shí)間,秒
timeout = 60
# 檢查
if (username == 'username'):
print ('請(qǐng)?jiān)O(shè)置好相關(guān)參數(shù)再測(cè)試')
else:
# 初始化
yundama = YDMHttp(username, password, appid, appkey)
# 登陸云打碼
uid = yundama.login();
print ('uid: %s' % uid)
# 查詢余額
balance = yundama.balance();
print ('balance: %s' % balance)
# 開始識(shí)別,圖片路徑无牵,驗(yàn)證碼類型ID墙懂,超時(shí)時(shí)間(秒)旅挤,識(shí)別結(jié)果
text = yundama.decode(filename, codetype, timeout);
- 原視頻UP主慕課網(wǎng)(聚焦Python分布式爬蟲必學(xué)框架Scrapy 打造搜索引擎)
- 本篇博客撰寫人: XiaoJinZi 個(gè)人主頁 轉(zhuǎn)載請(qǐng)注明出處
- 學(xué)生能力有限 附上郵箱: 986209501@qq.com 不足以及誤處請(qǐng)大佬指責(zé)