spider_使用cookie模拟登录

"""
使用cook模拟登陆(反 登录)
"""
from urllib import request
import chardet

def baiDu():
url = "https://www.baidu.com/"
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Cookie":"BAIDUID=954E1AC8A830CE77E8B1E19CD1659681:FG=1; BIDUPSID=954E1AC8A830CE77E8B1E19CD1659681; PSTM=1555899179; BD_UPN=12314753; BDUSS=0ZzMFpSbzVEeTZDSnRIamRMazM3UnM3QTVxWHNvS1RubllveGZaZkI2Y1Z2UmhkSVFBQUFBJCQAAAAAAAAAAAEAAAC~TsVzuf6wobCh0b25~tG9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABUw8VwVMPFcc; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_HOME=1; H_PS_PSSID=26522_1431_21098_29073_20880_28518_29099_29134_28831_28584_22160; sugstore=1"
}
# 1.使用ip代理,构建字典
proxy = {
"http":"182.34.32.54:39219"
}
# 2.构建代理处理器
proxyHandler= request.ProxyHandler(proxy)
# 3.构建一个url打开器
opener = request.build_opener(proxyHandler)
# 4.安装url打开器
request.install_opener(opener)
# 将user-agent注入到request请求里
req=request.Request(url, headers=headers)
# 获取字节流网页源代码
bytesHtml =request.urlopen(req).read()
# 解析网页编码格式,返回一个字典
htmlEncode=chardet.detect(bytesHtml)
# 二进制代码解码
html = bytesHtml.decode(htmlEncode["encoding"])
return html

bd = baiDu()
print(bd)
人生苦短,我用python!
原文地址:https://www.cnblogs.com/YangQingHong/p/10971574.html