爬虫实战1:使用requests和正则爬取电影信息

代码如下

 1 # coding=utf-8
 2 import requests
 3 from requests.exceptions import RequestException
 4 import re
 5 import json
 6 from multiprocessing import Pool  #引入进程池
 7 
 8 
 9 def get_one_page(url):
10     headers = {
11         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
12                       'Chrome/65.0.3325.181 Safari/537.36'
13     }
14     try:
15         response = requests.get(url, headers=headers)
16         if response.status_code == 200:
17             return response.text
18         return None
19     except RequestException:
20         return None
21 
22 
23 def parse_one_page(html):
24     #得到排名,简报,标题,主演,上映时间,分数
25     pattern = re.compile('<dd>.*?board-index.*?>(d+)</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>'
26                          '.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
27                          '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
28 
29     items = re.findall(pattern, html)
30 
31     #对输出内容进行格式化,将原先的元组格式转化为字典
32     for item in items:
33         yield {
34             'index': item[0],
35             'image': item[1],
36             'title': item[2],
37             'actor': item[3].strip()[3:],
38             'time': item[4].strip()[5:],
39             'score': item[5]+item[6]
40         }
41 
42 #json.dumps方法将字典转变为字符串
43 #encoding和下面的ascii如果不写的话resul.txt文件内容为乱码
44 def write_to_file(content):
45     with open('result.txt', 'a', encoding='utf-8') as f:
46         f.write(json.dumps(content, ensure_ascii=False) + '
')
47         f.close()
48 
49 
50 #offset用来表示不同页面
51 def main(offset):
52     url = 'http://maoyan.com/board/4?offset=' + str(offset)
53     html = get_one_page(url)
54     for item in parse_one_page(html):
55         print(item)
56         write_to_file(item)
57 
58 if __name__ == '__main__':
59     '''
60     抓取top100的影片信息的一般方法,i*10的原因是网址上每页offset是按10的倍数变化的
61     for i in range(10):
62         main(i*10)
63 '''
64     #使用进程池提高效率
65     pool = Pool()
66     pool.map(main, [i*10 for i in range(10)])

使用pyquery简单实现

from pyquery import PyQuery as pq

headers={ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}

def write_to_file(content):
with open('result.txt', 'a') as f:
f.write(content)

def main(offset): url='http://maoyan.com/board/4?offset=' + str(offset) doc=pq(url, headers=headers) dd=doc('dd').text() for x in dd.split(" "): print(x) print(' ')
write_to_file(x + ' ')
"""
合些的话可以如下
c=''.join([x, ' '])
print(c)
"""
if __name__ == '__main__': for i in range(10): main(i*10)
原文地址:https://www.cnblogs.com/regit/p/9261633.html