python-scrapy-全站数据爬取-CrawlSpider

提取符合正则要求的url
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class SunSpider(CrawlSpider):
name = 'sun'
start_urls = ['http://wz.sun0769.com/political/index/politicsNewest?id=1&type=4&page=']

# 链接提取器:可以根据指定的规则(allow)进行链接的提取
link = LinkExtractor(allow=r'/political/index/politicsNewest?id=1&page=d+')

rules = (
# 规则解析器,可以将like取到的链接进行发送,follow=True所有符合要求的链接都可以取出来
Rule(link, callback='parse_item', follow=True),
)

def parse_item(self, response):
print(response)


提取起始页面中所有的url
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

class SunSpider(CrawlSpider):
name = 'sun'
start_urls = ['http://wz.sun0769.com/political/index/politicsNewest?id=1&type=4&page=']

# 链接提取器:可以根据指定的规则(allow)进行链接的提取
link = LinkExtractor(allow=r'')

rules = (
# 规则解析器,可以将like取到的链接进行发送,follow=True所有符合要求的链接都可以取出来
Rule(link, callback='parse_item', follow=True),
)

def parse_item(self, response):
print(response)


原文地址:https://www.cnblogs.com/shiyi525/p/14274569.html