Python爬虫入门-3

---恢复内容开始---

云打码平台

  • 注册:普通用户和开发者用户
  • 登录:
    • 登录普通用户(查看余额)
    • 登录开发者用户:
      • 创建一个软件:我的软件->创建软件
      • 下载示例代码:开发者中心->下载最新的DLL->pythonHttp示例代码下载
import http.client, mimetypes, urllib, json, time, requests

######################################################################

class YDMHttp:

    apiurl = 'http://api.yundama.com/api.php'
    username = ''
    password = ''
    appid = ''
    appkey = ''

    def __init__(self, username, password, appid, appkey):
        self.username = username  
        self.password = password
        self.appid = str(appid)
        self.appkey = appkey

    def request(self, fields, files=[]):
        response = self.post_url(self.apiurl, fields, files)
        response = json.loads(response)
        return response
    
    def balance(self):
        data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['balance']
        else:
            return -9001
    
    def login(self):
        data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['uid']
        else:
            return -9001

    def upload(self, filename, codetype, timeout):
        data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
        file = {'file': filename}
        response = self.request(data, file)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['cid']
        else:
            return -9001

    def result(self, cid):
        data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)}
        response = self.request(data)
        return response and response['text'] or ''

    def decode(self, filename, codetype, timeout):
        cid = self.upload(filename, codetype, timeout)
        if (cid > 0):
            for i in range(0, timeout):
                result = self.result(cid)
                if (result != ''):
                    return cid, result
                else:
                    time.sleep(1)
            return -3003, ''
        else:
            return cid, ''

    def report(self, cid):
        data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
        response = self.request(data)
        if (response):
            return response['ret']
        else:
            return -9001

    def post_url(self, url, fields, files=[]):
        for key in files:
            files[key] = open(files[key], 'rb');
        res = requests.post(url, files=files, data=fields)
        return res.text

######################################################################

# 用户名(普通用户)
username    = 'bobo328410948'

# 密码
password    = 'bobo328410948'                            

# 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得!
appid       = 6003                                    

# 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得!
appkey      = '1f4b564483ae5c907a1d34f8e2f2776c'    

# 图片文件
filename    = 'getimage.jpg'                        

# 验证码类型,# 例:1004表示4位字母数字,不同类型收费不同。请准确填写,否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html
codetype    = 1004

# 超时时间,秒
timeout     = 10                                    

# 检查
if (username == 'username'):
    print('请设置好相关参数再测试')
else:
    # 初始化
    yundama = YDMHttp(username, password, appid, appkey)

    # 登陆云打码
    uid = yundama.login();
    print('uid: %s' % uid)

    # 查询余额
    balance = yundama.balance();
    print('balance: %s' % balance)

    # 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果
    cid, result = yundama.decode(filename, codetype, timeout);
    print('cid: %s, result: %s' % (cid, result))

######################################################################
def getCodeDate(userName,pwd,codePath,codeType):
    # 用户名(普通用户)
    username    = userName

    # 密码
    password    = pwd                            

    # 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得!
    appid       = 6003                                    

    # 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得!
    appkey      = '1f4b564483ae5c907a1d34f8e2f2776c'    

    # 图片文件
    filename    = codePath                       

    # 验证码类型,# 例:1004表示4位字母数字,不同类型收费不同。请准确填写,否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html
    codetype    = codeType

    # 超时时间,秒
    timeout     = 2                                   
    result = None
    # 检查
    if (username == 'username'):
        print('请设置好相关参数再测试')
    else:
        # 初始化
        yundama = YDMHttp(username, password, appid, appkey)

        # 登陆云打码
        uid = yundama.login();
        #print('uid: %s' % uid)

        # 查询余额
        balance = yundama.balance();
        #print('balance: %s' % balance)

        # 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果
        cid, result = yundama.decode(filename, codetype, timeout);
        #print('cid: %s, result: %s' % (cid, result))
    return result
#人人网的模拟登录
import requests
import urllib
from lxml import etree
#获取session对象
session = requests.Session()
#将验证码图片进行下载
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
url = 'http://www.renren.com/'
page_text = requests.get(url=url,headers=headers).text

tree = etree.HTML(page_text)
code_img_url = tree.xpath('//*[@id="verifyPic_login"]/@src')[0]
urllib.request.urlretrieve(url=code_img_url,filename='code.jpg')

#识别验证码图片中的数据值
code_data = getCodeDate('bobo328410948','bobo328410948','./code.jpg',2004)

#模拟登录
login_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=201914927558'
data = {
    "email":"www.zhangbowudi@qq.com",
    "icode":code_data,
    "origURL":"http://www.renren.com/home",
    "domain":"renren.com",
    "key_id":"1",
    "captcha_type":"web_login",
    "password":"4f0350f09aeffeef86307747218b214b0960bdf35e30811c0d611fe39db96ec1",
    "rkey":"9e75e8dc3457b14c55a74627fa64fb43",
    "f":"http%3A%2F%2Fwww.renren.com%2F289676607",
}
#该次请求产生的cookie会被自动存储到session对象中
session.post(url=login_url,data=data,headers=headers)

url = 'http://www.renren.com/289676607/profile'
page_text = session.get(url=url,headers=headers).text

with open('renren.html','w',encoding='utf-8') as fp:
    fp.write(page_text)
from fake_useragent import UserAgent
ua = UserAgent(verify_ssl=False,use_cache_server=False).random
print(ua)

import requests
import urllib
from lxml import etree
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}

#模拟登录古诗文网
s = requests.Session()
login_url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx'
page_text = requests.get(url=login_url,headers=headers).text
tree = etree.HTML(page_text)
img_src = 'https://so.gushiwen.org'+tree.xpath('//*[@id="imgCode"]/@src')[0]
img_data = s.get(url=img_src,headers=headers).content
with open('./img.jpg','wb') as fp:
    fp.write(img_data)
img_text = getCodeDate('bobo328410948','bobo328410948','./img.jpg',1004)

#模拟登录
url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx'
data = {
    "__VIEWSTATE":"9AsGvh3Je/0pfxId7DYRUi258ayuEG4rrQ1Z3abBgLoDSOeAUatOZOrAIxudqiOauXpR9Zq+dmKJ28+AGjXYHaCZJTTtGgrEemBWI1ed7oS7kpB7Rm/4yma/+9Q=",
    "__VIEWSTATEGENERATOR":"C93BE1AE",
    "from":"http://so.gushiwen.org/user/collect.aspx",
    "email":"www.zhangbowudi@qq.com",
    "pwd":"bobo328410948",
    "code":img_text,
    "denglu":"登录",
}
page_text = s.post(url=url,headers=headers,data=data).text
with open('./gushiwen.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

selenium 的详细用法

1 按键操作

from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import time

driver = webdriver.Chrome()
driver.get('http://www.baidu.com')

time.sleep(3)
driver.find_element_by_id('kw').send_keys(u'我爱你')

time.sleep(3)
# 表示ctrl+a : 全选
driver.find_element_by_id('kw').send_keys(Keys.CONTROL, 'a')
time.sleep(3)
# 表示ctrl+x: 剪贴
driver.find_element_by_id('kw').send_keys(Keys.CONTROL, 'x')

time.sleep(3)
driver.find_element_by_id('kw').send_keys(u'我爱爱你')
driver.find_element_by_id('su').click()

time.sleep(3)
# 退出浏览器
driver.quit()

2 时间等待
selenium 由网页驱动,驱使浏览器进行操作,速度慢是一大特点,经常会出现代码执行完了,但是网页内容还没有加载完毕, 里面的标签没有显示出来,如果这时候操作里面的标签,就会爆出异常, NoSuchElementException
解决办法: 时间休眠 , 不管页面的内容有没有加载完毕, 一定要休眠够指定的秒数

from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver

driver = webdriver.Chrome()
driver.get('http://www.baidu.com')
driver.find_element_by_id('kw').send_keys(u'我爱你')
button = driver.find_element_by_id('su')

# WebDriverWait: 网页等待
#   值1: 等待的对象
#   值2: 等待的时间
# WebDriverWait 经常和 until 以及 until not 一起使用         until:直到...
# 等待直到目标标签出现
is_visible = WebDriverWait(button, 10).until(lambda x: button.is_displayed())
print(is_visible)
# 返回True
button.click()

WebDriverWait 和time.sleep() 的异同:
1 都是让程序等待指定的时间,
2 time 的时间是固定的,时间长短不会随着标签的加载速度而改变,
WebDriverWait 时间是不固定的,等待多少时间要看标签的加载时间和指定的固定时间
3 如果在指定的时间内,标签仍然没有加载出来,那么time 和WebDriverWait都会爆出异常

3 点击事件

from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait

driver = webdriver.Chrome()
driver.get('http://www.baidu.com')

# 三者输出的内容是一样的
logo = driver.find_element_by_xpath('//div[@id="lg"]/img')
logo2 = driver.find_element_by_class_name('index-logo-src')
logo3 = driver.find_element_by_css_selector('#lg > img')

WebDriverWait(driver, 10).until(lambda x: logo.is_displayed())
ActionChains.double_click(logo).perform()


# context.click 表示右击
action = ActionChains.context_click(logo)
# 操作事件会跑到perform 队列里面
action.perform()

# 鼠标移动
more = driver.find_element_by_class_name('bri')
WebDriverWait(driver, 10).until(lambda x: more.is_displayed())
ActionChains.move_to_element(more).perform()

4, 标签选择

这个是要解析的网页

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Title</title>
</head>
<body>
    <button id="red" class="red" onclick="fun1()">按钮1</button>
    <button type="button" name="username" onclick="fun2()">按钮2</button>
    <button type="button" onclick="fun3()">按钮3</button>
    <button id="yellow" onclick="fun4()">按钮4</button>
    <script>
        function fun1(){
            document.body.style.backgroundColor = 'black'
        }
        function fun2(){
            document.body.style.backgroundColor = 'purple'
        }
        function fun3(){
            document.body.style.backgroundColor = 'pink'
        }
        function fun4(){
            document.body.style.backgroundColor = 'yellow'
        }
    </script>
</body>
</html>

标签选择的具体用法:

from selenium import webdriver
from selenium.webdriver.common.by import By
import os

driver = webdriver.Chrome()
driver.get('file:///' + os.path.abspath('4 index.html'))

# 通过标签名字来找到指定的标签
# 注意这里是elements
btns = driver.find_elements_by_tag_name('button')
btns[1].click()


# 1 通过索引来找到指定的标签
for btn in btns:
    btn.click()
    # 2 通过属性来找到特定的标签
    if btns.get_attribute('name') == 'username':
        btns.click()


# find_element_by_XXX  通过XXX来找到所有标签当中的第一个标签
# find_elements_by_XXX 通过XXX 来找到所有符合的标签
# 下面这个找到的是第一个标签名字是button的
btn = driver.find_element_by_tag_name('button').click()

# 弹出指定的元素,如果不写索引,默认是最后一个
driver.find_elements_by_css_selector('button').pop(1).click()

# [type=button]  [] 里面为限制条件,限制选择的内容

# 它是一个列表,[0] 代表的就是符合type=button的所有标签,里面的第一个元素
driver.find_elements_by_css_selector('button[type=button]')[0].click()

# 通过...来找到指定标签,相当于一个总和
driver.find_element(by=By.id, value='yellow').click()

5 window 切换

from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time

driver = webdriver.Chrome()
driver.get('http://www.baidu.com')

# 获取当前window对象
current_window = driver.current_window_handle
print(current_window, driver.title)
# CDwindow-E5F3FC897FF4B4F7EA29CE1D42CCF738 百度一下,你就知道

time.sleep(3)
driver.find_element_by_name('tj_trnews').click()

news = WebDriverWait(driver, 10).until(lambda x: driver.find_element_by_css_selector('.hdline0 .a3'))
news.click()

all_windows = driver.window_handles

for window in all_windows:
    if window != current_window:
        time.sleep(4)
        driver.switch_to_window(window)
        # 获取百度新闻 h1 标题
        title = driver.find_element_by_xpath('//div[@class="cnt_bd"]/h1')
        WebDriverWait(title, 10).until(lambda x: title.is_displayed())
        print(title.text)

# 关闭浏览器
# driver.quit()

driver.switch_to_window(current_window)
print(driver.find_element_by_css_selector('#footer span').text)

6使用selenium简单的对淘宝信息进行爬取

from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import time

driver = webdriver.Chrome()
driver.get('http://www.taobao.com')

driver.find_element_by_id('q').send_keys(u'辣条')
time.sleep(3)

# 下面是寻找四种‘点击搜索‘按钮的方式:

# 标签name之间有空格,只取最后一个
# driver.find_element_by_css_selector('.J_SearchPanel .tb-bg').click()

# 这个是只取, 空格前面的第一个 。也可以加载
# driver.find_element_by_css_selector('.search-panel .btn-search').click()

# 这个是根据标签的属性值来寻找,也可以执行
# driver.find_element_by_css_selector('button[data-spm-click="gostr=/tbindex;locaid=d13"]').click()

# 根据xpath也可以执行 点击按钮
driver.find_element_by_xpath('//div[@class="search-button"]/button').click()

for page in range(1, 3):
    print('正在爬取第{}页数据'.format(page))
    # 这里爬取的电脑页面,一页总共显示13行,一个屏幕最多显示两行
    for row in range(1, 13, 2):
        # x 表示把页面平均分成12份
        x = float(row) / 12
        # document : 网页
        # documentElement: 网页标签
        # scroll: 滑动
        # scrollTop: 从屏幕顶部往下滑动多少
        # 计算每次移动的js 代码片段,
        # 在Python里不能直接执行Js代码,所以将代码写成字符串形式
        # scrollTop 整个网页内从上到下可以滑动的长度
        js = 'document.documentElement.scrollTop=document.
        documentElement.scrollHeight * {}'.format(x)
        # js = 'document.documentElement.scrollTop=document
        .documentElement.scrollHeight * %f' % x
        driver.execute_script(js)
        time.sleep(3)
    item_list = driver.find_elements_by_class_name('J_MouserOnverReq')
    for item in item_list:
        with open('lianxi.txt', 'a', encoding='utf8') as f:
            f.write(item.text)
            f.write('
')
    # 这个表示跳转到下一页
    driver.find_element_by_xpath('//li[@class="item next"]/a').click()
  • 环境安装:pip install selenium
  • 编码流程:
    • 导报:from selenium import webdriver
    • 实例化某一款浏览器对象
    • 自指定自动化操作代码
from selenium import webdriver
from time import sleep

bro = webdriver.Chrome(executable_path=r'C:UsersAdministratorDesktop爬虫+数据day_03_爬虫chromedriver.exe')
bro.get(url='https://www.baidu.com/')
sleep(2)
text_input = bro.find_element_by_id('kw')

text_input.send_keys('人民币')
sleep(2)
bro.find_element_by_id('su').click()

sleep(3)

#获取当前的页面源码数据
print(bro.page_source)
bro.quit()
#获取豆瓣电影中更多电影详情数据
url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:UsersAdministratorDesktop爬虫+数据day_03_爬虫chromedriver.exe')
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

sleep(1)
bro.quit()
#谷歌无头浏览器
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')

#获取豆瓣电影中更多电影详情数据
url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:UsersAdministratorDesktop爬虫+数据day_03_爬虫chromedriver.exe',chrome_options=chrome_options)
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)
print(page_text)
sleep(1)
bro.quit()
#phantomJs
#获取豆瓣电影中更多电影详情数据
url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:UsersAdministratorDesktop爬虫+数据day_03_爬虫phantomjs-2.1.1-windowsinphantomjs.exe')
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

sleep(1)
bro.quit()
#qq空间
bro = webdriver.Chrome(executable_path=r'C:UsersAdministratorDesktop爬虫+数据day_03_爬虫chromedriver.exe')
url = 'https://qzone.qq.com/'
bro.get(url=url)
sleep(2)
#定位到一个具体的iframe
bro.switch_to.frame('login_frame')
bro.find_element_by_id('switcher_plogin').click()
sleep(2)

bro.find_element_by_id('u').send_keys('460086804')
bro.find_element_by_id('p').send_keys('shuo0127')

bro.find_element_by_id('login_button').click()

sleep(5)

page_text = bro.page_source
with open('qq.html','w',encoding='utf-8') as fp:
    fp.write(page_text)
bro.quit()
#爬取梨视频数据
import requests
import re
from lxml import etree
from multiprocessing.dummy import Pool
import random

#实例化一个线程池对象
pool = Pool(5)
url = 'https://www.pearvideo.com/category_1'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
page_text = requests.get(url=url,headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@id="listvideoList"]/ul/li')

video_url_list = []
for li in li_list:
    detail_url = 'https://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]
    detail_page = requests.get(url=detail_url,headers=headers).text
    video_url = re.findall('srcUrl="(.*?)",vdoUrl',detail_page,re.S)[0]
    video_url_list.append(video_url)
    
video_data_list = pool.map(getVideoData,video_url_list)

pool.map(saveVideo,video_data_list)

def getVideoData(url):
    return requests.get(url=url,headers=headers).content

def saveVideo(data):
    fileName = str(random.randint(0,5000))+'.mp4'
    with open(fileName,'wb') as fp:
        fp.write(data)

---恢复内容结束---

原文地址:https://www.cnblogs.com/songhuasheng/p/10451655.html