Python Spider 抓取今日头条街拍美图

"""
抓取今日头条街拍美图
"""

import os
import time
import requests

from hashlib import md5


class SpiderToutiao(object):

    def __init__(self):
        # 指定下载目录
        self.download_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "download")
        # 通过分析请求, 我们发现了需要的地址如下, 且分页是通过 offset + 20 控制的
        self.url = "https://www.toutiao.com/search_content/" 
                   "?offset={0}&format=json&keyword=%E8%A1%97%E6%8B%8D&autoload=true&count=20&cur_tab=3&from=gallery"
        # 构造请求头, 伪装成 Ajax 请求
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/66.0.3359.139 Safari/537.36",
            "Referer": "https://www.toutiao.com/search/?keyword=%E8%A1%97%E6%8B%8D",
            "X-Requested-With": "XMLHttpRequest"
        }

    def handler(self, offset=0):

        while True:
            response = requests.get(self.url.format(offset), headers=self.headers)
            if response.status_code == 200:
                print("INFO -> Current URL: <%s>" % response.url)
                json_data = response.json().get("data")
                # 开始解析数据
                if json_data:
                    for item in json_data:
                        _title = item.get("title")
                        _imgDict = item.get("image_list")
                        # 修正一下URL, 默认给的图片地址是小图, 我们要高清大图
                        _imgList = [str("http:" + _.get("url")).replace("list", "large") for _ in _imgDict]

                        # 创建存储目录
                        _downloadDir = os.path.join(self.download_dir, _title)
                        if not os.path.exists(_downloadDir):
                            os.makedirs(_downloadDir)

                        # 下载并保存文件
                        for img in _imgList:
                            r = requests.get(img)
                            _file = os.path.join(_downloadDir, md5(r.content).hexdigest() + ".jpg")
                            if not os.path.exists(_file):
                                with open(_file, "wb") as f:
                                    f.write(r.content)
                            else:
                                print("INFO -> ig <%s>" % _file)
                # 说明没有数据了, 程序退出
                else:
                    break

                # 分页自增
                offset += 20
                # 间隔时间
                time.sleep(.9)
            else:
                print(response.reason)
                exit(999)


if __name__ == "__main__":

    spider = SpiderToutiao()
    spider.handler()
原文地址:https://www.cnblogs.com/wangxiaoqiangs/p/8999738.html