selenium中文文檔:
https://selenium-python-zh.readthedocs.io/en/latest/navigating.html#id2
查找元素:
image.png
from selenium.webdriver.common.by import By
driver.find_element(By.XPATH, '//button[text()="Some text"]')
driver.find_elements(By.XPATH, '//button')
下面是 By 類的一些可用屬性:
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
login_form = driver.find_element_by_id('loginForm')#id
username = driver.find_element_by_name('username')#name
password = driver.find_element_by_name('password')
continue = driver.find_element_by_name('continue')
login_form = driver.find_element_by_xpath("/html/body/form[1]")#xpath
login_form = driver.find_element_by_xpath("http://form[1]")
login_form = driver.find_element_by_xpath("http://form[@id='loginForm']")
定位元素棍郎,lxml和selenium的區(qū)別
image.png
表單元素
image.png
select
image.png
按鈕的點擊事件
image.png
行為鏈
image.png
image.png
cookie操作
image.png
打開一個頁面 driver.get(“http://www.example.com”)
現(xiàn)在設置Cookies撬碟,這個cookie在域名根目錄下(”/”)生效 cookie = {‘name’ : ‘foo’, ‘value’ : ‘bar’} driver.add_cookie(cookie)
現(xiàn)在獲取所有當前URL下可獲得的Cookies driver.get_cookies()
頁面等待
顯式等待是你在代碼中定義等待一定條件發(fā)生后再進一步執(zhí)行你的代碼。 最糟糕的案例是使用time.sleep(),它將條件設置為等待一個確切的時間段。 這里有一些方便的方法讓你只等待需要的時間。WebDriverWait結合ExpectedCondition 是實現(xiàn)的一種方式。
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get("http://somedomain/url_that_delays_loading")
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "myDynamicElement"))
)#最多等10s,有就有豹芯,沒就拋異常
finally:
driver.quit()
image.png
隱式等待
如果某些元素不是立即可用的,隱式等待是告訴WebDriver去等待一定的時間后去查找元素驱敲。 默認等待時間是0秒铁蹈,一旦設置該值,隱式等待是設置該WebDriver的實例的生命周期众眨。
from selenium import webdriver
driver = webdriver.Firefox()
driver.implicitly_wait(10) # seconds,等待10s木缝,沒有就報錯
driver.get("http://somedomain/url_that_delays_loading")
myDynamicElement = driver.find_element_by_id("myDynamicElement")
打開一個新的頁面(切換頁面)
image.png
driver切換到新的頁面:driver.switch_to_window(handle)
image.png
image.png
設置代理ip
image.png
image.png
WebElement
image.png
拉勾網爬蟲
1便锨、方法一:分析接口,但是會被限制我碟,如果太快放案,加headers參數(shù),cookies矫俺,referer吱殉。。厘托。
image.png
image.png
image.png
image.png
2友雳、方法二:selenium
image.png
image.png
image.png
image.png
image.png
image.png
image.png
image.png
image.png
image.png
image.png
這里不能有text
代碼:
from selenium import webdriver
import requests
from lxml import etree
import time
import csv
class Lagou(object):
driver_path = r"C:/Users/ASUS/AppData/Local/Programs/Python/Python36/Scripts/chromedriver.exe"
def __init__(self, *args, **kwargs):
self.driver = webdriver.Chrome(executable_path=Lagou.driver_path)
self.url = 'https://www.lagou.com/jobs/list_java?city=%E6%9D%AD%E5%B7%9E&cl=false&fromSearch=true&labelWords=&suginput='
self.positions = []
self.count = 0
def run(self):
self.driver.get(self.url)
while True:
source = self.driver.page_source
try:
self.parse_list_page(source)
next_btn = self.driver.find_element_by_xpath("http://div[@class='pager_container']//span[last()]")
if self.count == 2 or ("pager_next_disabled" in next_btn.get_attribute("class")):
self.export_csv()
break
else:
next_btn.click()
self.count += 1
except:
self.export_csv()
print(source)
time.sleep(1)
def export_csv(self):
headers = {'name','salary','company'}
with open('job.csv','w',encoding='utf-8',newline='') as fp:
writer = csv.DictWriter(fp,headers)
writer.writeheader()
writer.writerows(self.positions)
def parse_list_page(self,source):
html = etree.HTML(source)
urls = html.xpath("http://div[@class='position']//a[@class='position_link']/@href")
for url in urls:
self.parse_page(url)
def parse_page(self,url):
self.driver.execute_script("window.open('%s')" %url)
self.driver.switch_to_window(self.driver.window_handles[1])
source = self.driver.page_source
self.parse_detail_page(source)
self.driver.close()
self.driver.switch_to_window(self.driver.window_handles[0])
def parse_detail_page(self,source):
html = etree.HTML(source)
job_name = html.xpath("http://div[@class='job-name']//span[@class='name']/text()")[0].strip()
salary = html.xpath("http://dd[@class='job_request']//span[@class='salary']/text()")[0].strip()
company = html.xpath("http://div[@class='job_company_content']//em[@class='fl-cn']/text()")[0].strip()
position = {
'name' : job_name,
'salary' : salary,
'company' : company
}
self.positions.append(position)
if __name__ == "__main__":
lagou = Lagou()
lagou.run()
image.png
遇到的問題:
爬到第二頁,需要登錄铅匹。押赊。就被迫停止了
解決了,加了一個微信掃碼登錄包斑,等待10s流礁,然后發(fā)現(xiàn)跳轉到的頁面是首頁,所以得搜索罗丰,添加輸入和點擊
from selenium import webdriver
import requests
from lxml import etree
import time
import csv
class Lagou(object):
driver_path = r"C:/Users/ASUS/AppData/Local/Programs/Python/Python36/Scripts/chromedriver.exe"
def __init__(self, *args, **kwargs):
self.driver = webdriver.Chrome(executable_path=Lagou.driver_path)
self.url = 'https://www.lagou.com/'
self.positions = []
self.count = 0
def run(self):
self.driver.get(self.url)
self.login()
self.search()
while True:
source = self.driver.page_source
try:
self.parse_list_page(source)
next_btn = self.driver.find_element_by_xpath("http://div[@class='pager_container']//span[last()]")
if self.count == 2 or ("pager_next_disabled" in next_btn.get_attribute("class")):
self.export_csv()
break
else:
next_btn.click()
self.count += 1
except:
self.export_csv()
print(source)
time.sleep(1)
def login(self):
login_btn = self.driver.find_element_by_xpath("http://div[@class='passport']//a[@class='login']")
login_btn.click()
weixin_login_btn = self.driver.find_element_by_xpath("http://div[@class='third-login-btns']//a[@class='wechat']")
weixin_login_btn.click()
time.sleep(10)
def search(self):
inputTag = self.driver.find_element_by_id("search_input")
inputTag.send_keys("java")
btn = self.driver.find_element_by_id("search_button")
btn.click()
def export_csv(self):
headers = {'name','salary','company'}
with open('job.csv','w',encoding='utf-8',newline='') as fp:
writer = csv.DictWriter(fp,headers)
writer.writeheader()
writer.writerows(self.positions)
def parse_list_page(self,source):
html = etree.HTML(source)
urls = html.xpath("http://div[@class='position']//a[@class='position_link']/@href")
for url in urls:
self.parse_page(url)
def parse_page(self,url):
self.driver.execute_script("window.open('%s')" %url)
self.driver.switch_to_window(self.driver.window_handles[1])
source = self.driver.page_source
self.parse_detail_page(source)
self.driver.close()
self.driver.switch_to_window(self.driver.window_handles[0])
def parse_detail_page(self,source):
html = etree.HTML(source)
job_name = html.xpath("http://div[@class='job-name']//span[@class='name']/text()")[0].strip()
salary = html.xpath("http://dd[@class='job_request']//span[@class='salary']/text()")[0].strip()
company = html.xpath("http://div[@class='job_company_content']//em[@class='fl-cn']/text()")[0].strip()
position = {
'name' : job_name,
'salary' : salary,
'company' : company
}
self.positions.append(position)
if __name__ == "__main__":
lagou = Lagou()
lagou.run()
一直報
ERROR:platform_sensor_reader_win.cc(244)] NOT IMPLEMENTED
這個錯
查到有這個解決辦法:
禁止chromedriver打印日志console信息
添加參數(shù) log-level
options = webdriver.ChromeOptions()
options.add_argument('--log-level=3')
browser = webdriver.Chrome(chrome_options=options)
browser.get(url)
## INFO = 0,
## WARNING = 1,
## LOG_ERROR = 2,
## LOG_FATAL = 3
## default is 0