python使用sessions模拟登录淘宝

之前想爬取一些淘宝的数据,后来发现需要登录,找了很多的资料,有个使用request的sessions加上cookie来登录的,cookie的获取在登录后使用开发者工具可以找到。不过这个登录后获得的网页的代码是静态的,获取动态网页还得另寻他法,一般需要的数据可以在网页的源码中得到,但是你知道的,有些动态加载的就不是那么简单了,而且我发现这样获得的源码中,有些想要获取的数据的格式是经过改动的,比如我要某个商品的具体链接,发现并不能直接使用。 总体而言,这是一次失败的尝试,不过倒是了解到使用sessions和cookies可以进到需要登录的网页,也算是一种方式吧。

记录一下失败的一次

import requests
import os
import json
from pyquery import PyQuery as pq
import re
import time

sessions = requests.session()
url = 'https://s.taobao.com/search?q=ipad'

sessions.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
sessions.headers['cookie']='miid=8428352431475518963; hng=CN***********; cna=IzulExo***************; thw=cn; __guid=15467**********; enc=Ubrfp%2******************************************************; t=5********************1e0; tracknick=%5*********3; lgc=40***3; tg=0; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0; cookie2=393e1f359e39e184059e8c87422bb5ce; v=0; _tb_token_=e7e035bee1ae8; _m_h5_tk=ebb49583b4434c3ff9f4bb277236a5d2_1541089384718; _m_h5_tk_enc=b0dd87431f8ade45b56bccb4982c0bf4; alitrackid=world.taobao.com; swfstore=29789; unb=3159140427; sg=374; _l_g_=Ug%3D%3D; skt=c9446f78d9091af3; cookie1=AHt5ehB%2FBw25k99NwMwTM4z3CWVA2J%2FVUVn4V3D2TMk%3D; csg=7b6476e0; uc3=vt3=F8dByRjNVxN9vRJQjTQ%3D&id2=UNGToApZ%2B2dYHA%3D%3D&nk2=sECE1uX4Wg%3D%3D&lg2=VFC%2FuZ9ayeYq2g%3D%3D; existShop=MTU0MTA4NzI2Ng%3D%3D; _cc_=Vq8l%2BKCLiw%3D%3D; dnk=%5Cu6C38%5Cu65E0%5Cu540D3; _nk_=%5Cu6C38%5Cu65E0%5Cu540D3; cookie17=UNGToApZ%2B2dYHA%'

for i in range(1):
    strs=str(i*44)
    urls=url+'&s='+strs
    
    html=sessions.get(urls).text
    doc=pq(html)
    
    doc=str(doc)
    os.chdir(r'G:PSPY')
    contentss=[]

    htmls=re.compile(r'p4pTags(.*?)"risk"')     
    garbage=re.compile(r'itemlist(.*?)"risk"')
    gb=garbage.findall(doc,re.S|re.M)
    finhtml=htmls.findall(doc,re.S|re.M)
    finhtml=finhtml+gb

    print(len(finhtml))
    #提取信息的正则表达式
    raw_title=r'"raw_title":"(.*?)"'
    view_price= r'"view_price":"(.*?)"' #价格
    view_fee=r'"view_fee":"(.*?)"'      #折扣
    item_loc = r'"item_loc":"(.*?)"' #地区
    view_sales = r'"view_sales":"(.*?)"' #付款人数
    comment_count = r'"comment_count":"(.*?)"' #评论数
    detail_url=r'"detail_url":"(.*?)"'     #url       

    for html in finhtml:
        rtitle=re.findall(raw_title,html)
        price=re.findall(view_price,html)
        fee=re.findall(view_fee,html)
        loc=re.findall(item_loc,html)
        sales= re.findall(view_sales,html)
        comment=re.findall(comment_count,html)
        deurl=re.findall(detail_url,html)
        for rt,p,f,l,s,c,u in zip(rtitle,price,fee,loc,sales,comment,deurl):
            contentss.append({"raw_title":rt,"view_price":p,"view_fee":f,"item_loc":l,"view_sales":s,"comment_count":c,"detail_url":u})    
    with open('ipad.json','a',encoding='utf-8') as file:
        file.write(json.dumps(contentss,indent=2,ensure_ascii=False))
    time.sleep(2)#访问间隔
原文地址:https://www.cnblogs.com/Guhongying/p/9937085.html