JS+Selenium+excel追加写入,使用python成功爬取京东任何商品~

之前一直是requests库做爬虫,这次尝试下使用selenium做爬虫,效率不高,但是却没有限制,文章是分别结合大牛的selenium爬虫以及excel追加写入操作而成,还有待优化,打算爬取更多信息后进行词云分析

'''
爬取京东商品信息:
    请求url:
        https://www.jd.com/
    提取商品信息:
        1.商品详情页
        2.商品名称
        3.商品价格
        4.评价人数
        5.商品商家
'''
# coding=UTF-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import xlrd
import xlwt
from xlutils.copy import copy


def write_excel_xls(path, sheet_name, value):
    index = len(value)  # 获取需要写入数据的行数
    workbook = xlwt.Workbook()  # 新建一个工作簿
    sheet = workbook.add_sheet(sheet_name)  # 在工作簿中新建一个表格
    for i in range(0, index):
        for j in range(0, len(value[i])):
            sheet.write(i, j, value[i][j])  # 像表格中写入数据(对应的行和列)
    workbook.save(path)  # 保存工作簿
    print("xls格式表格写入数据成功!")


def write_excel_xls_append(path, value):
    index = len(value)  # 获取需要写入数据的行数
    workbook = xlrd.open_workbook(path)  # 打开工作簿
    sheets = workbook.sheet_names()  # 获取工作簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取工作簿中所有表格中的的第一个表格
    rows_old = worksheet.nrows  # 获取表格中已存在的数据的行数
    new_workbook = copy(workbook)  # 将xlrd对象拷贝转化为xlwt对象
    new_worksheet = new_workbook.get_sheet(0)  # 获取转化后工作簿中的第一个表格
    for i in range(0, index):
        for j in range(0, len(value[i])):
            new_worksheet.write(i + rows_old, j, value[i][j])  # 追加写入数据,注意是从i+rows_old行开始写入
    new_workbook.save(path)  # 保存工作簿
    print("xls格式表格【追加】写入数据成功!")


def read_excel_xls(path):
    workbook = xlrd.open_workbook(path)  # 打开工作簿
    sheets = workbook.sheet_names()  # 获取工作簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取工作簿中所有表格中的的第一个表格
    for i in range(0, worksheet.nrows):
        for j in range(0, worksheet.ncols):
            print(worksheet.cell_value(i, j), "	", end="")  # 逐行逐列读取数据
        print()


def get_good(driver):
    value = []
    # 通过JS控制滚轮滑动获取所有商品信息
    js_code = '''
            window.scrollTo(0,5000);
        '''
    driver.execute_script(js_code)  # 执行js代码

    # 等待数据加载
    time.sleep(2)

    # 3、查找所有商品div
    # good_div = driver.find_element_by_id('J_goodsList')
    good_list = driver.find_elements_by_class_name('gl-item')
    n = 1
    for good in good_list:
        # 根据属性选择器查找
        # 商品链接
        good_url = good.find_element_by_css_selector(
            '.p-img a').get_attribute('href')

        # 商品名称
        good_name = good.find_element_by_css_selector(
            '.p-name em').text.replace("
", "--")

        # 商品价格
        good_price = good.find_element_by_class_name(
            'p-price').text.replace("
", ":")

        # 评价人数
        good_commit = good.find_element_by_class_name(
            'p-commit').text.replace("
", " ")

        # good_content = f'''
        #                 商品链接: {good_url}
        #                 商品名称: {good_name}
        #                 商品价格: {good_price}
        #                 评价人数: {good_commit}
        #                 

        #                 '''
        # print(good_content)
        # with open('jd.txt', 'a', encoding='utf-8') as f:
        #     f.write(good_content)
        value1 = [good_url, good_name, good_price, good_commit]
        value.append(value1)

    return value


if __name__ == '__main__':
    good_name = input('请输入爬取商品信息:').strip()
    num = int(input('请输入要爬取的页数:'))
    driver = webdriver.Chrome()
    driver.implicitly_wait(10)
    # # 1、往京东主页发送请求
    driver.get('https://www.jd.com/')
    #
    # # 2、输入商品名称,并回车搜索
    input_tag = driver.find_element_by_id('key')
    input_tag.send_keys(good_name)
    input_tag.send_keys(Keys.ENTER)
    time.sleep(2)
    # 评论数排行
    driver.find_element_by_link_text('评论数').click()
    time.sleep(2)
    book_name_xls = good_name + '.xls'
    sheet_name_xls = good_name
    value_title = [["商品链接", "商品名称", "商品价格", "评价人数"], ]
    write_excel_xls(book_name_xls, sheet_name_xls, value_title)
    for i in range(0, num):
        value = get_good(driver)
        write_excel_xls_append(book_name_xls, value)
        next_tag = driver.find_element_by_class_name('pn-next')
        next_tag.click()
        time.sleep(2)
        read_excel_xls(book_name_xls)
    driver.close()
原文地址:https://www.cnblogs.com/ClarenceSun/p/12782756.html