1、python爬虫 request.urlopen请求网页获取源码

# python3导入request包
from urllib import request
import sys
import io
# 如果需要用print打印时,如果出现异常可以先设置输出环境
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# 需要获取的url
url = 'http://www.jinri.com/'
# 头文件
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36"
}
# 生成请求对象
req = request.Request(url, headers=headers)
# 调用request的urlopen方法发起请求,并返回结果对象,如果没有data参数时,则是get请求,否则是post请求
response = request.urlopen(req)
# 将结果写入html文件中,
with open('a.html', 'wb') as f:
    f.write(response.read())
# 打印返回的状态码
print(response.getcode())
# 打印返回的url,防止重定向url变化 
print(response.url)
原文地址:https://www.cnblogs.com/toloy/p/8611036.html