1. 綜述
根據(jù)前期的 NodeJS
代理池
開展的爬蟲
終于有所進(jìn)展奈揍,技術(shù)實(shí)現(xiàn)已初步實(shí)現(xiàn)绅项,相關(guān)引用資料
如下所示:
異步處理
https://blog.csdn.net/tinyzhao/article/details/52684473
//這不懂紊册,做不了異步
https://github.com/Blackyukun/IPProxyPool
//這是有些代碼
https://segmentfault.com/a/1190000008814676?utm_source=tag-newest
//這可以了
https://www.cnblogs.com/shenh/p/9090586.html
//最大并發(fā)數(shù)
https://docs.aiohttp.org/en/stable/client_advanced.html
//沒啥事還是得去看官方文檔
2.代碼及注釋
'''
@Descripttion: 爬蟲測試頁面
@Author: BerryBC
@Date: 2020-01-30 15:00:34
@LastEditors : BerryBC
@LastEditTime : 2020-01-31 13:53:19
'''
# 超時(shí)
# https://www.cnblogs.com/gl1573/p/10129382.html
# 爬蟲
# http://c.biancheng.net/view/2011.html
# 鏈接MongoDB
# https://blog.51cto.com/1767340368/2092960
# 異步處理
# https://blog.csdn.net/tinyzhao/article/details/52684473 //這不懂,做不了異步
# https://github.com/Blackyukun/IPProxyPool //這是有些代碼
# https://segmentfault.com/a/1190000008814676?utm_source=tag-newest //這可以了
# https://www.cnblogs.com/shenh/p/9090586.html //最大并發(fā)數(shù)
# https://docs.aiohttp.org/en/stable/client_advanced.html //沒啥事還是得去看官方文檔
# 引用庫
import requests
import time
import pymongo
from bs4 import BeautifulSoup
from target import arrTarget
import random
import time
import asyncio
import aiohttp
# 鏈接 MongoDB 的鏈接信息以及引用相關(guān)的表
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["dbProxy"]
mydb.authenticate('Berry', 'Berry')
mycol = mydb["tbProxy"]
# 讀取 MongoDB 中的代理信息
arrProxy = []
# 其中從數(shù)據(jù)庫中讀取 fail 次數(shù)小于等于8次的趁怔,只讀取 u 跟 p 字段湿硝,按 fail 倒序排序
for x in mycol.find({"fail": {"$lte": 8}}, {"_id": 0, "u": 1, "p": 1}).sort("fail", -1):
arrProxy.append(x)
# 傳入給 aiohttp 用的信息
url = 'http://***.******.***/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
intProxyLen = len(arrProxy)
# 聲明拼死干活的函數(shù)
async def funSpyWeb(eleWeb, inSemaphore):
# 限制并發(fā)數(shù)
async with inSemaphore:
bolRetry = True
intTryTime = 0
url = eleWeb
try:
async with aiohttp.ClientSession() as session:
# 判斷是否需要重試以及是否所有代理均已用完
while (bolRetry and (intTryTime < intProxyLen)):
try:
# 導(dǎo)入代理
real_proxy = "http://" + \
arrProxy[intTryTime]["u"] + \
":"+arrProxy[intTryTime]["p"]
# 異步請求網(wǎng)頁內(nèi)容
async with session.get(url, proxy=real_proxy, timeout=5,headers=headers) as response:
# 如果成功了,這是測試润努,所以就暫時(shí)不管是否成功,只要不超時(shí)就行了
# if response.status == 200:
# self._conn.put(proxy)
# print('Valid proxy', proxy)
strhtml = await response.text()
# 列出所有鏈接示括,方便以后處理
soup = BeautifulSoup(strhtml, 'lxml')
data = soup.select('a')
# for item in data:
# result = {
# 'title': item.get_text(),
# 'link': item.get('href')
# }
# print(result)
# 如果鏈接不超時(shí)即完成铺浇。
bolRetry = False
print(" After " + str(intTryTime) +
" time, success reach " + url)
# print(data[0].get_text())
except Exception as e:
intTryTime += 1
# print(' Fail ' + str(intTryTime) + ' time')
except Exception as e:
print(e)
# 這里就只是調(diào)用而已,并且記錄開始結(jié)束時(shí)間
print('Begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
loop = asyncio.get_event_loop()
# 定義并發(fā)線程數(shù)
semaphore = asyncio.Semaphore(20)
# 把待爬 URL 放進(jìn)其中
waittask = asyncio.gather(
*([funSpyWeb(strWebSite, semaphore) for strWebSite in arrTarget]))
loop.run_until_complete(waittask)
loop.close()
print('Program End : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# --------- 以下不懂垛膝,失敗嘗試-------------
# class classSpy():
# def __init__(self, arrInProxy):
# self.arrProxy = iter(arrInProxy)
# def __aiter__(self):
# return self
# async def __anext__(self):
# try:
# eleProxy = next(self.arrProxy)
# except StopIteration:
# raise StopAsyncIteration
# return eleProxy
# arrTmp = []
# arrTmp.append(1)
# arrTmp.append(2)
# arrTmp.append(3)
# arrTmp.append(4)
# arrTmp.append(5)
# arrTmp.append(6)
# async def run():
# print('Begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# async for eleBe in classSpy(arrTmp):
# await asyncio.sleep(random.randint(1, 3))
# print(' Now : ' + str(eleBe) + ' , time: ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
# print('End : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# loop = asyncio.get_event_loop()
# loop.run_until_complete(run())
# ------------------------------------------
# --------- 以下為異步嘗試成功-------------
# arrTmp = []
# arrTmp.append(1)
# arrTmp.append(2)
# arrTmp.append(3)
# arrTmp.append(4)
# arrTmp.append(5)
# arrTmp.append(6)
# async def run(eleBe,inSemaphore):
# async with inSemaphore:
# await asyncio.sleep(random.randint(1, 3))
# print(' Now : ' + str(eleBe) + ' , time: ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
# def funDone(waittask):
# print('Callback End : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# print('Begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# # -------------調(diào)用方式1----------------
# async def main():
# semaphore=asyncio.Semaphore(2)
# waittask =asyncio.gather (*( [run(proxy,semaphore) for proxy in arrTmp]))
# waittask.add_done_callback(funDone)
# await asyncio.gather(waittask)
# asyncio.run(main())
# # -------------------------------------
# # -------------調(diào)用方式2----------------
# loop = asyncio.get_event_loop()
# semaphore=asyncio.Semaphore(2)
# waittask =asyncio.gather (*( [run(proxy,semaphore) for proxy in arrTmp]))
# waittask.add_done_callback(funDone)
# loop.run_until_complete(waittask)
# loop.close()
# # -------------------------------------
# print('Program End : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# ------------------------------------------
3.輸出結(jié)果
...
After 2 time, success reach http://nx.cntour.cn/
After 48 time, success reach http://www.cntour.cn/usercenter/default.aspx
After 48 time, success reach http://bj.cntour.cn/
After 48 time, success reach http://nmg.cntour.cn/
After 48 time, success reach http://cntour.cn
After 97 time, success reach http://sd.cntour.cn/
After 97 time, success reach http://sh.cntour.cn/
After 58 time, success reach http://sc.cntour.cn/
After 58 time, success reach http://us.cntour.cn/
After 21 time, success reach http://jp.cntour.cn/
After 21 time, success reach http://www.cntour.cn/domestic/237/
After 87 time, success reach http://mc.cntour.cn/
...
4.后續(xù)思考
其實(shí)主要做的當(dāng)然是把爬
出來的 URL
放進(jìn)數(shù)據(jù)庫
鳍侣,然后標(biāo)識時(shí)間
,定期情理吼拥。
每次爬的都是少量倚聚,可以嘗試爬門戶網(wǎng)站
或者社交網(wǎng)站
等。
之后當(dāng)然要考慮爬相關(guān)網(wǎng)站時(shí)凿可,如何區(qū)分已爬
信息跟未爬
信息惑折,這個(gè)可能需要重點(diǎn)考慮或者考慮個(gè)性化
授账。