[Python爬虫] 之十七:Selenium +phantomjs 利用 pyquery抓取梅花网数据

  一、介绍

    本例子用Selenium +phantomjs爬取梅花网(http://www.meihua.info/a/list/today)的资讯信息,输入给定关键字抓取资讯信息。

    给定关键字:数字;融合;电视

    抓取信息内如下:

      1、资讯标题

      2、资讯链接

      3、资讯时间

      4、资讯来源

 

  二、网站信息

    

    

    

      

    

    

   

  三、数据抓取

    针对上面的网站信息,来进行抓取

    1、首先抓取信息列表

      抓取代码:Elements = doc('li[class="item"]') 

    2、抓取标题

      抓取代码:title = element('a[class="list-title-color no-link"]').text().encode('utf8').strip()

    3、抓取链接

      抓取代码:url = 'http://www.meihua.info' + element('a[class="list-title-color no-link"]').attr('href')

    4、抓取日期

      抓取代码:date = element('span[class="desc-font date"]').text().encode('utf8').strip()

    5、抓取来源

      抓取代码:strSource = dochtml('div[class="art-content"]').eq(0).find('font').eq(0).text().encode('utf8').strip()

   

  四、完整代码

# coding=utf-8
import os
import re
from selenium import webdriver
import selenium.webdriver.support.ui as ui
import time
from datetime import datetime
from selenium.webdriver.common.action_chains import ActionChains
import IniFile
from threading import Thread
from pyquery import PyQuery as pq
import LogFile
import mongoDB

class meihuaSpider(object):
    def __init__(self):

        logfile = os.path.join(os.path.dirname(os.getcwd()), time.strftime('%Y-%m-%d') + '.txt')
        self.log = LogFile.LogFile(logfile)
        configfile = os.path.join(os.path.dirname(os.getcwd()), 'setting.conf')
        cf = IniFile.ConfigFile(configfile)
        self.webSearchUrl_list = cf.GetValue("meihua", "webSearchUrl").split(';')
        self.keyword_list = cf.GetValue("section", "information_keywords").split(';')
        self.db = mongoDB.mongoDbBase()
        self.start_urls = []
        for url in self.webSearchUrl_list:
            self.start_urls.append(url)

        self.driver = webdriver.PhantomJS()
        self.wait = ui.WebDriverWait(self.driver, 2)
        self.driver.maximize_window()

    def Comapre_to_days(self,leftdate, rightdate):
        '''
        比较连个字符串日期,左边日期大于右边日期多少天
        :param leftdate: 格式:2017-04-15
        :param rightdate: 格式:2017-04-15
        :return: 天数
        '''
        l_time = time.mktime(time.strptime(leftdate, '%Y-%m-%d'))
        r_time = time.mktime(time.strptime(rightdate, '%Y-%m-%d'))
        result = int(l_time - r_time) / 86400
        return result

    def date_isValid(self, strDateText):
        '''
        判断日期时间字符串是否合法:如果给定时间大于当前时间是合法,或者说当前时间给定的范围内
        :param strDateText: 四种格式 '今天'; '前天' ; '昨天' ;'06月12日 '
        :return: True:合法;False:不合法
        '''
        currentDate = time.strftime('%Y-%m-%d')
        if strDateText.find('今天') > -1 :
            return True, currentDate
        return False, ''


    def log_print(self, msg):
        '''
        #         日志函数
        #         :param msg: 日志信息
        #         :return:
        #         '''
        print '%s: %s' % (time.strftime('%Y-%m-%d %H-%M-%S'), msg)

    def scrapy_date(self):
        strsplit = '------------------------------------------------------------------------------------'
        for link in self.start_urls:
            self.driver.get(link)
            selenium_html = self.driver.execute_script("return document.documentElement.outerHTML")
            doc = pq(selenium_html)
            infoList = []

            self.log.WriteLog(strsplit)
            self.log_print(strsplit)
            Elements = doc('li[class="item"]')  # class属性内容以item doc style-开始的元素
            for element in Elements.items():
                date = element('span[class="desc-font date"]').text().encode('utf8').strip()
                flag, strDate = self.date_isValid(date)
                if flag:
                    title = element('a[class="list-title-color no-link"]').text().encode('utf8').strip()

                    for keyword in self.keyword_list:
                        if title.find(keyword) > -1:
                            url = 'http://www.meihua.info' + element('a[class="list-title-color no-link"]').attr('href')

                            dictM = {'title': title, 'date': strDate,
                             'url': url, 'keyword': keyword, 'introduction': title, 'source': ''}
                            infoList.append(dictM)
                            break
            if len(infoList)>0:
                for item in infoList:
                    url =item['url']
                    self.driver.get(url)
                    htext = self.driver.execute_script("return document.documentElement.outerHTML")
                    dochtml = pq(htext)

                    slen =len('来源:')
                    strSource = dochtml('div[class="art-content"]').eq(0).find('font').eq(0).text().encode('utf8').strip()
                    if strSource.find('来源') > -1:
                        strSource = strSource[strSource.find('来源') + slen:]
                        if strSource.find('') > -1:
                            strSource = strSource[0:strSource.find(''):]
                        item['source'] = strSource
                    elif strSource.find('梅花网原创')>-1:
                        item['source'] = " 梅花网"
                    else:
                        strSource = dochtml('div[class="art-content"]').text().encode('utf8').strip()
                        if strSource.find('梅花网原创')>-1 or strSource.find('消息源:梅花网')>-1:
                            item['source'] = '梅花网'
                        else:
                            strSource = dochtml('a[rel="nofollow"]').text().encode('utf8').strip()
                            item['source'] = strSource
                    self.log_print('title:%s' % item['title'])
                    self.log_print('url:%s' % item['url'])
                    self.log_print('date:%s' % item['date'])
                    self.log_print('source:%s' % item['source'])
                    self.log_print('kword:%s' % item['keyword'])
                    self.log_print(strsplit)
                self.db.SaveInformations(infoList)

        self.driver.close()
        self.driver.quit()

obj = meihuaSpider()
obj.scrapy_date()
原文地址:https://www.cnblogs.com/shaosks/p/7053374.html