requests-异步-梨视频下载

requests库

爬虫的本质就是模仿浏览器请求数据,所以requests帮我做的也就是这个事情

本质就是封装了urllib3

Get-Post

get-post主要的区别就是携带参数的方式不同

get

headers={}
url=''
params={}
cookies={}

requests.get(url=url,headers=v,params=params,cookies=cookies)

post

headers={}
url=''
data={}
cookies={}

requests.get(url=url,headers=v,data=data,cookies=cookies)

响应Response

response.text	获取响应文本
response.content	获取网页上的二进制图片、视频
response.encoding	获取网页编码
response.encoding=”utf-8”	设置网页编码
response.status_code	获取响应状态码
response.headers	获取响应头信息
response.cookies	获取cookies信息
response.url	获取url信息
response.history	获取history信息
response.json    获取json数据

梨视频首页视频

import re
import requests
import uuid
import time
from concurrent.futures import ThreadPoolExecutor

url = 'https://www.pearvideo.com'

headers={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
}

page = requests.get(url=url,headers=headers).text

#原始数据
video1 = re.findall('<a href="(.*?)" class', page, re.S)
#有效数据
video2 = []

#过滤数据,并凭借有效的url
for i in video1:
    if i.startswith('video'):
        video2.append('https://www.pearvideo.com/'+i)


#获取视频链
def get_video(v_url):
    page = requests.get(url=v_url, headers=headers).text
    video_src = re.findall('srcUrl="(.*?)",vdo', page)[0]
    return video_src

#保存视频
def save_video(res):
    video_src = res.result()
    print(f"{video_src}开始下载")
    response = requests.get(video_src)
    with open(f'video/{uuid.uuid4()}.mp4','wb') as fw:
        for i in response.iter_content():
            fw.write(i)
    print(f"{video_src}下载成功")

#创建线程池
pool = ThreadPoolExecutor(20)

#下载
for v_url in video2:
    time.sleep(0.2)
    #获取视频链接,通过回调函数下载
    pool.submit(get_video,v_url).add_done_callback(save_video)

2

import requests
import re
from lxml import etree
from multiprocessing.dummy import Pool
import os

if not os.path.exists("video"):
    os.mkdir("video")

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}

url='https://www.pearvideo.com'
#存放视频名字和数据链接
video_all=[]

res=requests.get(url=url,headers=headers).text
tree=etree.HTML(res)
video_list=tree.xpath('//ul[@id="actRecommendCont"]/li/div')

for li in video_list:
    #拼接视频名称
    video_name=li.xpath('./a/h3/text()')[0]+'.mp4'
    #拼接视频播放界面
    video_url="https://www.pearvideo.com/"+li.xpath('./a/@href')[0]
    #请求视频播放界面
    res2=requests.get(url=video_url,headers=headers).text
    #由于视频链接是由js生成的,所以只能用re匹配
    video_src=re.findall('srcUrl="(.*?)",vdo',res2)[0]

    dic = {"name":video_name,"url":video_src}

    video_all.append(dic)

#下载方法
def dow_video(vie_dic):
    print(vie_dic['name'] + "下载中...")
    res=requests.get(url=vie_dic['url'],headers=headers).content
    with open("video/"+vie_dic['name'],'wb')as fw:
        fw.write(res)
        print(vie_dic['name']+"下载成功!")

#开启8个线程
pool=Pool(8)
#多线程执行
pool.map(dow_video,video_all)

#关闭pool,使其不在接受新的(主进程)任务
pool.close()
#主进程阻塞后,让子进程继续运行完成,子进程运行完后,再把主进程全部关掉。
pool.join()


参考链接

https://www.cnblogs.com/xiaoyuanqujing/articles/11805698.html

http://docs.python-requests.org/en/master/

原文地址:https://www.cnblogs.com/zx125/p/11421329.html