Scrapy 爬取豆瓣登录二维码

近日正在学习python Scrapy,以豆瓣网页为目标,本篇随笔主要记录爬取豆瓣登录二维码(抓取及保存)
在爬取中,需要设置等待页面加载的时间,否则会出现无法定位到网页元素的问题,因为这个问题,我弄了一上午。。。

#coding = utf-8
# -*- coding:utf-8 -*-
from selenium import webdriver
from urllib.request import urlretrieve
import time
import requests

# 使用二维码扫描同时获得原始cookies
def get_cookie_QRCode(url):
driver = webdriver.Chrome()
driver.get(url)
time.sleep(10)
driver.switch_to.frame(driver.find_elements_by_tag_name('iframe')[0])
#button = driver.find_element_by_xpath(' // *[ @ id = "tipsButton"]') # 等待3秒点击
button = driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[1]/a[1]')#找到切换二维码登录按钮
button.click()
time.sleep(10)#等待页面加载二维码完成
'''
抓取豆瓣登录二维码
'''
button = driver.find_elements_by_xpath('/html/body/div[1]/div[3]/div[1]/div[1]/img') # 点击“二维码认证”
listSrc=[]
for index,img in enumerate(button):
src = img.get_attribute('src')
urlretrieve(src, '{}.jpg'.format(index))
cookies = driver.get_cookies()
driver.quit()
return cookies

if __name__ == '__main__':
url = "https://www.douban.com/"
cookie = get_cookie_QRCode(url)
#cookie = get_cookie_DynamicCode(url)
add_cookie(cookie)
原文地址:https://www.cnblogs.com/liuffblog/p/12713054.html