python爬虫获取百度图片(没有精华,只为娱乐)

python3.7,爬虫技术,获取百度图片资源,msg为查询内容,cnt为查询的页数,大家快点来爬起来。
注:现在只能爬取到百度的小图片,以后有大图片的方法,我会陆续发贴。

#!/usr/bin/env python
# -*- coding:utf-8 -*-

#爬虫,百度图片
import urllib.request
import urllib
import re
import sys
import codecs
from urllib.parse import quote

#print(sys.stdout.encoding)
#sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())


#获取网站首页全部内容
msg = urllib.parse.quote("汽车") #解决中文不识别问题
url = "https://image.baidu.com/search/index?tn=baiduimage&ie=utf-8&word="+msg
cnt = 30 #页数
x = 0
for i in range(1,cnt,1):
url="https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&pn='+i+'&word="+msg
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
req = urllib.request.Request(url, headers={'User-Agent': user_agent})
response = urllib.request.urlopen(req)
content = response.read().decode('utf-8')
#print(content)

#筛选
reg = '(https://ss0.bdstatic.com/.*?.jpg)'
imgre = re.compile(reg)
imglist = re.findall(imgre,content)
print(imglist)
temp = ''
for imgurl in imglist:
if temp != imgurl:
print(imgurl)
urllib.request.urlretrieve(imgurl,r'D:pythonImage\%s.jpg' % x)
x+=1
temp = imgurl

结果:


原文地址:https://www.cnblogs.com/Monster-World/p/9546068.html