PythonSelenium设置元素等待的三种⽅式
Selenium 设置元素等待的三种⽅式
1. sleep 强制等待
2. implicitly_wait() 隐性等待
3. WebDriverWait()显⽰等待
三种⽅式的优缺点
1. sleep 强制等待
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
sleep(2)    #设置等待2秒钟
<('www.baidu')
优点:
代码简介,简单明了
缺点:
如果设置sleep等待时间过短,元素还没加载出来,程序报错,sleep设置等待时间过长,元素早就加载出来了,程序还在等待,浪费是时间,影响代码整体的运⾏效率
个⼈看法:
简单粗暴,根据⽹站的响应速度和⾃⼰的⽹速来设置合理的休眠时间
2. implicitly_wait() 隐性等待
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.implicitly_wait(20) #设置等待20秒钟
<('www.baidu')
优点:
1.代码简介
2.在代码前部分加implicitly_wait(10) ,整个的程序运⾏过程中都会有效(作⽤于全局,直接在初始化driver的后⾯加,后⾯的代码都会受影响),都会等待元素加载完成
3.在设置的时间内没有加载到整个页⾯,则会报NosuchElementError。如果元素在第10s被加载出来,⾃动执⾏下⾯的脚本,不会⼀直等待10s
缺点:
1. ⾮要加载到整个页⾯才执⾏代码,这样影响代码的执⾏效率,⼀般情况下,我们想要的结果是只需加载到了我要定位的元素就执⾏代码,不需要等待整个页⾯的完全加载出来再执⾏代码。
个⼈看法:
1.不适合⽤在数据在ajax的⽹站中,⽐如翻页什么的,某个元素⼀直存在,但是数据⼀直在变,这样的话只要加载出来第⼀页,后⾯翻页的数据全部会和第⼀页的数据相同,因为代码判断了这个元素已经被加载出来了,不会等ajax去加载
3. WebDriverWait()显⽰等待
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait    #WebDriverWait注意⼤⼩写
from selenium.webdrivermon.by import By
driver = webdriver.Chrome()
<('www.baidu')
try:
element =
WebDriverWait(driver,10).until(EC.presence_of_element_located((By.ID,'kw')))
element.send_keys('123')
driver.find_element_by_id('su').click()
except Exception as message:
print('元素定位报错%s'%message)
finally:
pass
优点:
代码执⾏效率快。⽆需等待整个页⾯加载完成,只需加载到你要定位的元素就可以执⾏代码。是最智能的设置元素等待的⽅式。
缺点:
1.要导⼊from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdrivermon.by import By
必须要导⼊以上3个包,导包路径相当的复杂,啰嗦⽽且⿇烦
2.写等待时间的代码也是复杂。步骤稍微有点多。
element=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.ID,‘kw')))
element.send_keys(‘123')
个⼈看法:相⽐于两种,这种⽅式可以算的上好的了,但是就是⿇烦,写的代码太多,使⽤的话可以和第⼀种⽅式sleep混合使⽤,不过我还是喜欢⽤sleep,本⾝使⽤selenium就是没办法破开⽹站,或者使⽤selenium⽐直接破解的⽅式更好才使⽤这种,我个⼈是能不⽤就不⽤,抓取速度太慢了。
附上我抓取⼀个⽹站的代码,这⽹站作者的成果抓不到,只好⽤这种⽅式来抓了:
from selenium import webdriver
import time
from lxml.html import etree
import copy
import json
from selenium.webdrivermon.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def getAuthors():
j1 = set()
f = open('Author.json', 'r', encoding='utf-8')
data = f.read()
data_list = data.split('\n')
for dt in data_list:
j1.add(dt)
f.close()
print('j1= ', len(j1))
j2 = set()
f1 = open('yzq.json', 'r', encoding='utf-8')
data1 = f1.read()
data_list1 = data1.split('\n')
for dt in data_list1:
j2.add(dt)
print('j2= ', len(j2))
countSet = j1 - j2
print('countset= ', len(countSet))
AuthorsData = []
for dt in countSet:
dt_json = json.loads(dt)
if int(dt_json["成果"]) > 0:
AuthorsData.append(dt_json)
# dt = {'img': 'www.scholarmate/avatars/99/92/62/37572.jpg', 'name': '吴伟',
#    'url': 'www.scholarmate/P/aeiUZr', 'org': '复旦⼤学, 教授', '项⽬': 20, '成果': 234, 'H指数': '24'}
print('AuthorData= ', len(AuthorsData))
return AuthorsData
def parseHtml(html, i):
temp_list = []
html_data = etree.HTML(html)
project_html = html_data.xpath('//div[@class="pub-idx__main"]')
for p in project_html:
# pro_name = p.xpath('./div[@class="pub-idx__main_title"]/a/@title')[0]
pro_name = p.xpath('.//a/@title')[0].strip().replace(r' a0', '')
# pro_url = p.xpath('./div[@class="pub-idx__main_title"]/a/@href')[0]
pro_url = p.xpath('.//a/@href')[0]
pro_author = p.xpath('./div[2]/@title')[0].strip().replace(' a0', '')
# pro_author = p.xpath('.//div[@class="pub-idx__main_author"]/@title')
pro_inst = p.xpath('./div[3]/@title')[0]
temp_dict = {
'num': i,
'pro_name': pro_name,
'pro_url': pro_url,
'pro_author': pro_author,
'pro_inst': pro_inst
}
temp_list.append(copy.deepcopy(temp_dict))
return temp_list
def parseData(author_data):
try:
url = author_data['url']
ach_num = int(author_data['成果'])
pages = ach_num // 10
pages_ys = ach_num % 10
selenium怎么使用if pages_ys > 0:
pages += 1
driver = webdriver.Chrome()
# driver.implicitly_wait(10)
<(url)
psn_data = []
for i in range(1, pages+1):
if i == 1:
# 防⽌抓取到半路的时候页⾯没有响应,这部分数据就直接扔掉
try:
# time.sleep(2)
driver.find_element_by_xpath('//*[@id="pubTab"]').click()
# time.sleep(3)
# 有以下这些选择
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'pub-idx__main')))
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CLASS_NAME, 'pub-idx__main')))
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, './/pub-idx__main')))          # 这个也不适合这个⽹站,还是会抓到重复的
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//div[@class="pub-idx__main"]')))          html = driver.page_source
temp_dict = parseHtml(html, i)
psn_data.append(copy.deepcopy(temp_dict))
except:
import traceback
print(traceback.print_exc())
pass
else:
# driver.find_element_by_xpath('//*[@id="pubTab"]').click()
# 将页⾯拉到底部
try:
js = "var q=document.documentElement.scrollTop=100000"
# time.sleep(1)
driver.find_element_by_xpath('//div[@class="pagination__pages_next"]').click()
# time.sleep(2)
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//div[@class="pub-idx__main"]')))          html = driver.page_source
temp_dict = parseHtml(html, i)
psn_data.append(copy.deepcopy(temp_dict))
except:
pass
driver.close()
psn_data = {
'init_data': author_data,
'psn_data': psn_data
}
print(psn_data)
psn_data_string = json.dumps(psn_data, ensure_ascii=False)
with open('data.json', 'a+', encoding='utf-8') as f:
f.write('{}\n'.format(psn_data_string))
author_data_string = json.dumps(author_data, ensure_ascii=False)
with open('yzq.json', 'a+', encoding='utf-8') as f:
f.write('{}\n'.format(author_data_string))
except:
pass
# import traceback
# print(traceback.print_exc())
# au_strign = json.dumps(author_data, ensure_ascii=False)
# author_data_string = json.dumps(au_strign, ensure_ascii=False)
# with open('error.json', 'a+', encoding='utf-8') as f:
#  f.write('{}\n'.format(author_data_string))
def main():
# authors的值:给出三条
# {"img": "www.scholarmate/avatars/e4/fe/1e/1000002077830.png?A=DMkT", "name": "胡婷",
# "url": "www.scholarmate/P/QFFbae", "org": "四川⼤学, 主治医师", "项⽬": "0", "成果": "11", "H指数": "0"}
# {"img": "www.scholarmate/avatars/01/ea/59/1000002180047.png?A=DVUy", "name": "⽩晓涓",
# "url": "www.scholarmate/P/73me22", "org": "", "项⽬": "6", "成果": "8", "H指数": "0"}
# {"img": "www.scholarmate/avatars/fe/0d/89/1000000732306.png?A=D65r", "name": "原鹏飞",
# "url": "www.scholarmate/P/77nIFr", "org": "国家统计局统计科学研究所, 副研究员", "项⽬": "0", "成果": "90", "H指数": "0"}
AuthorsData = getAuthors()
for authors in AuthorsData:
print('author= ', authors)
parseData(authors)
if __name__ == '__main__':
main()
友情链接:
到此这篇关于Python Selenium 设置元素等待的三种⽅式的⽂章就介绍到这了,更多相关Selenium 元素等待内容请搜索以前的⽂章或继续浏览下⾯的相关⽂章希望⼤家以后多多⽀持!

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。