Python 爬虫_Request+正则表达式爬取猫眼电影Top100

流程框架:
1. 抓取但也网页内容
2. 正则表达式分析
3. 开启循环及多线程
4. 保存至文件

代码如下: 

import requests
from requests.exceptions import RequestException
import re
import json


def get_one_page(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None


def parse_one_page(html):  # 对爬取的信息解析
    pattern = re.compile('<dd>,*?board-index,*?>(d*)</i>.*?data-src="(.*?)".*?name"><a'
                         * '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
                         * '>*?integer">(.*?)</i>.*?fraction>(.*?)</i>.*?</dd>', re.s)
    items =re.findall(pattern,html)
    print(items)
    for item in items:
        yield{                 #提取信息,格式化,yield是生成器,生成字典
            'index':item[0],
            'image':item[1],
            'title':item[2],
            'actor':item[3].strip()[3:], #strip去掉换行符,后面是切片去掉冒号
            'time':item[4].strip()[5:],
            'score':item[5]+item[6]
        }

def write_to_file(content):     #写入文件
    with open('result.txt','a',encoding='utf-8') as f:
        f.write(json.dumps(content,ensure_ascii=False) + '
')
        f.close()

def main(offset):   #offset 是页码
    url = 'http://maoyan.com/board/4?offset=' +str(offset)
    html = get_one_page(url)
    for item in parse_one_page(html):
        print(item)



if __name__ == '__main__':
    for i in range(10):     #十个页面循环抓取
        main(i*10)

if __name__ == '__main__':
    pool = Pool()
    pool.map(main,[i*10 for i in range(10)]) #十个页面同时抓取
原文地址:https://www.cnblogs.com/spencersun/p/9580654.html