由于ncbi的genome搜索結(jié)果的頁面對應(yīng)的翻頁操作發(fā)送的請求是個post請求鲁冯,并且其參數(shù)眾多不太好用requests模塊直接請求到頁面梢为,因此直接用selenium模擬翻頁操作獲得每頁源碼數(shù)據(jù)即可祝辣,由于結(jié)果比較多颗品,因此考慮使用多線程/異步/多進程等等苍碟,這里用的是線程池的操作啥供,我在這里調(diào)用8個線程,針對是哺乳類基因組頁面的搜索結(jié)果窥淆,還是比較快的卖宠,一共22頁,每個步驟/參數(shù)/函數(shù)的作用都標注出來了忧饭。
from lxml import etree
from selenium import webdriver
from multiprocessing.dummy import Pool
from functools import partial
import os
import requests
# 實現(xiàn)無可視化界面
from selenium.webdriver.chrome.options import Options
# 實現(xiàn)規(guī)避檢測
from selenium.webdriver import ChromeOptions
def setoption():
"""
谷歌瀏覽器常規(guī)反反爬的參數(shù)設(shè)置
"""
# 實現(xiàn)無可視化界面的操作
chrome_options = Options()
chrome_options.add_experimental_option(
'excludeSwitches', ['enable-logging'])
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
# 實現(xiàn)規(guī)避檢測
option = ChromeOptions()
option.add_experimental_option("excludeSwitches",
["enable-automation"])
return chrome_options, option
def getonepage(pagetext, filepath):
"""
得到一頁所有物種對應(yīng)詳情頁的物種名+基因組信息統(tǒng)計扛伍,
并且寫入到文件當中。
"""
tree = etree.HTML(pagetext)
initurl = "https://www.ncbi.nlm.nih.gov"
div_list = tree.xpath(
'//*[@id="maincontent"]/div/div[5]/div[@class="rprt"]')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
for div in div_list:
detail_url = initurl + div.xpath('.//p/a/@href')[0]
response = requests.get(detail_url, headers=headers)
response = response.content.decode()
detail_html = bytes(bytearray(response, encoding='utf-8'))
detail_tree = etree.HTML(detail_html)
name = detail_tree.xpath(
'//*[@id="maincontent"]/div/div[5]/div/div[2]/table[1]//tr//span[1]/text()')[0]
summary = "".join(detail_tree.xpath(
'//*[@id="mmtest1"]/div/div/div/table//tr//text()'))
print(name, summary, sep="\n")
with open(filepath, "a", encoding="utf-8") as fp:
fp.write(name+"\n"+summary+"\n")
def mainprocess(chrome_options, option, executable_path, filepath, thread=4):
"""
開啟selenium無頭瀏覽器词裤,先得到每一頁的源碼數(shù)據(jù)存儲刺洒,
然后用每頁源碼數(shù)據(jù)作為參數(shù),進行多線程搭建吼砂。
"""
# 讓selenium規(guī)避被檢測到的風(fēng)險
bro = webdriver.Chrome(executable_path=executable_path,
chrome_options=chrome_options,
options=option)
bro.get("https://www.ncbi.nlm.nih.gov/genome/?term=txid40674%5BOrganism%3Aexp%5D")
# 生成用于多線程使用的參數(shù)列表
pagetext_list = []
# 獲取當前頁源碼數(shù)據(jù)
pagetext = bro.page_source
print("Append page1 to the queue.")
pagetext_list.append(pagetext)
# 獲取全部頁碼總數(shù)
allpagetree = etree.HTML(pagetext)
allpage = int(allpagetree.xpath('//*[@id="pageno2"]/@last')[0])
# 將每頁的源碼數(shù)據(jù)加入多線程參數(shù)列表
for pagenum in range(2, allpage+1):
next_btn = bro.find_element_by_xpath(
"/html/body/div[1]/div[1]/form/div[1]/div[4]/div/div[7]/div/a[1]")
next_btn.click()
pagetext = bro.page_source
print(f"Append page{pagenum} to the queue.")
pagetext_list.append(pagetext)
# 檢測是否存在之前的文件
if os.path.isfile(filepath):
os.remove(filepath)
# 多線程使用
pool = Pool(thread)
# param = {pagetext: pagetext_list, filepath: filepath}
pool.map(partial(getonepage, filepath=filepath), pagetext_list)
pool.close()
pool.join()
bro.quit()
if __name__ == "__main__":
chrome_options, option = setoption()
mainprocess(chrome_options, option,
r"chromedriver.exe", "genomeinfo.txt", 8)