15-爬虫之scrapy框架基于管道实现数据库备份02

基于管道实现数据备份

  • 将爬取到的数据分别存储到不同的载体
  • 将数据一份存储到本地一份存储到mysql和redis
  • 一个管道类对应一种形式的持久化存储操作,如果将数据存储到不同得载体中就需要使用多个管道类
    创建一个爬虫工程:scrapy startproject proName
    进入工程目录创建爬虫源文件:scrapy genspider spiderName www.xxx.com
    执行工程:scrapy crawl spiderName

创建mysql创建数据库文件夹
在这里插入图片描述
创建数据库表
在这里插入图片描述

在这里插入图片描述

修改pipelines.py 配置文件

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql # 导入mysql类
from redis import Redis

class GemoumouPipeline:
    fp = None
    # 重写父类的两个方法
    def open_spider(self,spider):
        print("我是open_spider(),我只会在爬虫开始的时候执行一次")
        self.fp = open("基于管道存储.txt","w",encoding="utf-8")
    def close_spider(self,spider):
        print("我是closer_spider(),我只会在爬虫结束的时候执行一次")
        self.fp.close()#关闭文件

    # 该方法是用来接受item对象的,一次只能接收一个item,说明该方法会被调用多次
    # 参数item:就是接收到的item
    def process_item(self, item, spider):
        #print(item) # item就是一个字典
        # 将item存储到文本文件中
        self.fp.write(item["title"] +":"+item["content"]+ '
')
        return item # 将item函数传递给下一个即将被执行得管道类

# 将数据存储到mysql中
class MysqlPipeline(object):
    conn = None #创建连接对象
    cursor = None # 首先创建一个游标
    def open_spider(self,spider):#创建链接对象
        self.conn = pymysql.Connect(host="127.0.0.1",port=3306,user="root",password="root",db="gemoumou",charset="utf8")
        print(self.conn) # 打印链接对象
    def process_item(self,item,spider):
        self.cursor = self.conn.cursor() # 创建一个游标
        sql = 'insert into duanziwang VALUES ("%s","%s")'%(item['title'],item['content']) # 向数据库中插入数据

        # 事务处理
        try:
            self.cursor.execute(sql) # 执行sql语句
            self.conn.commit() # 没问题就提交
        except Exception as e:
            print(e) # 如果有问题就打印异常
            self.conn.rollback()
        return item
    def close_spider(self,spider): # 关闭连接
        self.cursor.close()
        self.conn.close()

# # 将数据写入redis数据库
# class RedisPipeline(object):
#     conn = None
#     def open_spider(self,spider):
#         self.conn = Redis(host="127.0.0.1",port="6379")
#         print(self.conn)
#     def process_item(self,item,spider):
#         self.conn.lpush("duanziwang",item)


修改isettings.py配置文件

  • 定义优先级
# Scrapy settings for gemoumou project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'gemoumou'

SPIDER_MODULES = ['gemoumou.spiders']
NEWSPIDER_MODULE = 'gemoumou.spiders'
LOG_LEVEL = 'ERROR' #指定类型日志的输出(只输出错误信息)

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#设置UA伪装
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'

# Obey robots.txt rules
#改成False不遵从robots协议
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'gemoumou.middlewares.GemoumouSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'gemoumou.middlewares.GemoumouDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html

ITEM_PIPELINES = {
    # 300 表示管道类的优先级,数值越小优先级越高
   # 优先级高的先被执行
   'gemoumou.pipelines.GemoumouPipeline': 300,
   'gemoumou.pipelines.MysqlPipeline': 301,
 #  'gemoumou.pipelines.RedisPipeline': 302,
} # 开启管道

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

在这里插入图片描述

爬虫源文件

import scrapy
from gemoumou.items import GemoumouItem


class GpcSpider(scrapy.Spider):
    # 爬虫文件的名称,当前源文件的唯一标识
    name = 'gpc'
    # allowed_domains表示允许的域名,用来限定start_urls那些url可以发请求那些不能
#    allowed_domains = ['www.xxx.com'] #我们一般给注释掉

    # start_urls起始url列表只可以存储url
    #作用:列表中存储的url都会被进行get请求的发送
    start_urls = ['https://duanziwang.com/category/经典段子/']

    # 数据解析
    #parse方法调用的次数取决于start_urls请求的次数
    #参数response:表示的就是服务器返回的响应对象

    # 基于管道的持久化存储
    def parse(self, response):
        # 数据解析名称和内容
        article_list = response.xpath('//*[@id="35087"]')
        for article in article_list:
            # 我们可以看见解析出来的内容不是字符串数据,说明和etree中xpath使用方式不同
            # xpath返回的列表中存储是Selector对象,说明我们想要的字符串数据被存储在了该对象的data属性中
            # extract()就是将data属性值取出
            # 调用extract_first() 将列表中第一个列表元素表示的Selector对象中的data值取出
            title = article.xpath("//div[@class='post-head']/h1[@class='post-title']/a/text()").extract_first()
            content = article.xpath("//div[@class='post-content']/p/text()").extract_first()
            # 实例化一个item类型的对象,将解析到的数据存储到该对象中
            item = GemoumouItem()
            # 不能使用item. 来调用数据
            item['title'] = title
            item['content']=content

            # 将item对象提交给管道
            yield item




items.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class GemoumouItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
#在解析中解析出来几个数据就定义几个属性
    title = scrapy.Field()
    content = scrapy.Field()



执行

在这里插入图片描述

本地txt

在这里插入图片描述

数据库

在这里插入图片描述

原文地址:https://www.cnblogs.com/gemoumou/p/13635330.html