Python爬虫抓取煎蛋(jandan.net)无聊图

 1 #!/usr/bin/python
 2 #encoding:utf-8
 3 '''
 4 @python 3.6.1
 5 @author: 1109159477@qq.com
 6 @create date: 20170611
 7 
 8 '''
 9 import requests
10 import urllib
11 import re
12 import os
13 
14 #当前最大页数
15 url='http://jandan.net/pic'
16 data=urllib.request.urlopen(url).read().decode('utf-8')
17 max_page_num=re.findall(r'<span class.*current-comment-page.*?>[(.*?)]</span>',data)[1]
18 
19 def download_pic(start_page,stop_page,download_file):
20     for num in range(start_page,stop_page):
21         os.chdir(download_file)#图片保存目录
22         url='http://jandan.net/pic/page-%s' % num
23         data=urllib.request.urlopen(url).read().decode('utf-8')
24         pics=re.findall(r'<a href="//(.*?)" target.*?</a><br />',data, re.I|re.S|re.M)
25         for i in pics:
26             i='http://'+i
27             r=requests.get(i)
28             pic_name=i[28:100]
29             with open(pic_name,'wb') as f:
30                 f.write(r.content)
31                 f.close()
32 
33 if __name__=='__main__':
34     print('当前最大页数为:  %s' % max_page_num)
35     page1,page2=input('请输入要下载页数范围,例如     500,513 :').split(',')  
36     download_pic(int(page1),int(page2),r'C:UserssamsungDesktopjd_pics')
37     
原文地址:https://www.cnblogs.com/stay-hungry/p/6983341.html