使用线程池 爬取梨视频生活视频

import requests
import os
from multiprocessing.dummy import Pool
from lxml import etree

原则: 线程池解决的是阻塞和耗时的操作

url = 'https://www.pearvideo.com/category_5'

headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0'
}
if not os.path.exists('./images'):
os.mkdir('./images')
page_text = requests.get(url=url, headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath('//ul[@id="listvideoListUl"]/li')
video_list1 = []
for li in li_list:
each = li.xpath('./div/a/@href')[0]
url_num = each.replace('video_', "")
name = li.xpath('./div/a/div[2]/text()')[0] + '.mp4'
dic = {
"url_num": url_num,
"name": name
}

video_list1.append(dic)

print(video_list1)

target = "https://www.pearvideo.com/videoStatus.jsp?contId="
new_url_list = []
for dic_data in video_list1:
new_url = target + dic_data['url_num']
# print(new_url)
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0',
'Referer': 'https://www.pearvideo.com/video_' + dic_data['url_num']
}
url_data = requests.get(url=new_url, headers=headers).json()
srcUrl = url_data['videoInfo']['videos']['srcUrl']
cont = 'cont-' + dic_data['url_num']
new1_url = srcUrl.replace(srcUrl.split("-")[0].split("/")[-1], cont)
new1_url_name = dic_data['name']
dic1 = {
"new_url":new1_url,
"new_url_name":new1_url_name
}
# print(new1_url)
new_url_list.append(dic1)

print(new_url_list)

使用线程池对视频数据进行请求(较为耗时的阻塞操作)

def get_video_data(dic):
url = dic["new_url"]
print(dic['new_url_name'], '正在下载。。。')
video_data = requests.get(url=url,headers=headers).content
with open(dic['new_url_name'], 'wb') as fp:
fp.write(video_data)
print(dic['new_url_name'],'下载成功!!')

pool = Pool(4)
pool.map(get_video_data, new_url_list)
pool.close()
pool.join()

人生苦短,我用python
原文地址:https://www.cnblogs.com/niucunguo/p/14438135.html