01-基于Redis的分布式爬虫(基于RedisCrawlSpider类)

分布式爬虫:
    1.概念:多台机器上可以执行统一爬虫程序,实现网站数据的分布式爬取。
    2.原生的scrapy 是不可以实现分布式爬虫的。
        2.1 调度器无法共享
        2.2 管道无法共享
    3. scrapy-redis组件:专门为scrapy开发的组件。实现分布式爬取
        3.1 下载:pip install scrapy-redis
    4.分布式爬取的流程:
        a. 安装Redis数据库
        b. redis配置文件的配置
        c. redis服务的开启,基于配置配置文件
        d. 创建scrapy工程后,创建基于crawlSpider的爬虫文件
        e. 导入 from scrapy_redis.spiders import RedisCrawlSpider
        f. 然后爬虫文件基于 RedisCrawlSpider 这个类的源文件
            class RedisqiubaiSpider(RedisCrawlSpider):
        g. 修改
            # start_urls = ['https://www.qiushibaike.com/pic/']

            # 调度器队列的名称 该行代码 跟start_urls含义一样
            redis_key = 'qiubaiSpider'
        h. 将项目的管道和调度器 配置成 基于 scrapy-redis的组件
        i. 执行爬虫文件
            scrapy runspider reidsQiubai.py
        j. 在redis的客户端,将起始url 放到调度器的队列中(qiubaiSpider 是调度器队列名称)
            lpush qiubaiSpider https://www.qiushibaike.com/pic/
        k. 查询爬取结果
            lrange redisQiubai:items 0 -1

修改配置文件

# settings.py

ROBOTSTXT_OBEY = False
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"
# 使用scrapy-redis组件中封装好的管道,将每台机器爬取到的数据存储通过该管道存储到redis数据库中,从而实现了多台机器的管道共享。

ITEM_PIPELINES = {
    # 'redisPro.pipelines.RedisproPipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 400,

}

# 使用scrapy-redis组件中封装好的调度器,将所有的url存储到该指定的调度器中,从而实现了多台机器的调度器共享。

# 使用scrapy-redis组件的去重队列
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是否允许暂停
SCHEDULER_PERSIST = True

# 如果redis 服务器不在自己本机,则需要如下配置:
# REDIS_HOST = 'redis服务的ip地址'
# REDIS_PORT = 6379

 

# 创建新项目
scrapy startproject redisPro cd redisPro
/ scrapy genspider -t crawl redisQiubai www.qiushibaike.com/pic/

 

# redisQiubai.py

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from redisPro.items import RedisproItem

from scrapy_redis.spiders import RedisCrawlSpider


class RedisqiubaiSpider(RedisCrawlSpider):
    name = 'redisQiubai'
    # allowed_domains = ['www.qiushibaike.com/pic/']
    # start_urls = ['https://www.qiushibaike.com/pic/']

    # 调度器队列的名称 该行代码 跟start_urls含义一样
    redis_key = 'qiubaiSpider'

    link = LinkExtractor(allow=r'/pic/page/\d+')
    rules = (
        Rule(link, callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        div_list = response.xpath('//*[@>)
        for div in div_list:
            img_url = "https:" + div.xpath('.//div[@class="thumb"]/a/img/@src').extract_first()
            item = RedisproItem()
            item['img_url'] = img_url

            yield item

 

# 切换到爬虫文件的目录
cd redisPro/redisPro/spiders/

# 执行爬虫文件
scrapy runspider redisQiubai.py

将起始url 放到 调度器队列中

爬虫(四)之分布式爬虫

查看爬取结果

爬虫(四)之分布式爬虫

02-UA池

- 作用:尽可能多的将scrapy工程中的请求伪装成不同类型的浏览器身份。

- 操作流程:

    1.在下载中间件中拦截请求
    2.将拦截到的请求的请求头信息中的UA进行篡改伪装
    3.在配置文件中开启下载中间件
# pipelines.py

# 导包
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
import random
# UA池代码的编写(单独给UA池封装一个下载中间件的一个类)
class RandomUserAgent(UserAgentMiddleware):

    def process_request(self, request, spider):
        # 从列表中随机抽选出一个ua值
        ua = random.choice(user_agent_list)
        # ua值进行当前拦截到请求的ua的写入操作
        request.headers.setdefault('User-Agent',ua)


user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
        "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]

03-代理池

- 作用:尽可能多的将scrapy工程中的请求的IP设置成不同的。
- 免费代理ip:http://www.goubanjia.com/
- 操作流程:

    1.在下载中间件中拦截请求
    2.将拦截到的请求的IP修改成某一代理IP
    3.在配置文件中开启下载中间件
# pipelines.py

# 批量对拦截到的请求进行ip更换
# 单独封装下载中间件类
class Proxy(object):
    def process_request(self, request, spider):
        # 对拦截到请求的url进行判断(协议头到底是http还是https)
        # request.url返回值:http://www.xxx.com
        h = request.url.split(':')[0]  # 请求的协议头
        if h == 'https':
            ip = random.choice(PROXY_https)
            request.meta['proxy'] = 'https://'+ip
        else:
            ip = random.choice(PROXY_http)
            request.meta['proxy'] = 'http://' + ip

# 可被选用的代理IP
PROXY_http = [
    '153.180.102.104:80',
    '195.208.131.189:56055',
]
PROXY_https = [
    '120.83.49.90:9000',
    '95.189.112.214:35508',
]

04-selenium在scrapy中的应用

selenium如何被应用到scrapy:
    a)在爬虫文件中导入webdriver类
    b)在爬虫文件的爬虫类的构造方法中进行了浏览器实例化的操作
    c)在爬虫类的closed方法中进行浏览器关闭的操作
    d)在下载中间件的process_response方法中编写执行浏览器自动化的操作    

·需求:爬取的是基于文字的新闻数据(国内,国际,军事,航空)

爬虫(四)之分布式爬虫

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy.http import HtmlResponse
import time

'''
UA池
'''
# 导包
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
import random
# UA池代码的编写(单独给UA池封装一个下载中间件的一个类)


class RandomUserAgent(UserAgentMiddleware):

    def process_request(self, request, spider):
        # 从列表中随机抽选出一个ua值
        ua = random.choice(user_agent_list)
        # ua值进行当前拦截到请求的ua的写入操作
        request.headers.setdefault('User-Agent',ua)


user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
        "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]


class WangyiproDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    # 可以拦截到响应对象(下载器传递给spider的响应对象)
    # request:响应对象对应的请求对象
    # response:拦截到的响应对象
    # spider:爬虫文件中对应的爬虫类的实例
    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest

        # 响应对象中存储页面数据的篡改
        # print(request.url)
        if request.url in ['http://news.163.com/domestic/', 'http://news.163.com/air/', 'http://war.163.com/', 'http://news.163.com/world/']:
            spider.bro.get(url=request.url)

            js = 'windows.scrollTo(0, document.body.scrollHeight)'
            spider.bro.execute_script(js)
            # 一定要给浏览器移动的缓冲加载数据的时间
            time.sleep(2)
            # page_text 包含了动态加载出来的页面数据
            page_text = spider.bro.page_source
            # current_url属性 表示 刚才浏览器发起请求所对应的url
            # body: 表示 响应对象所携带的数据值
            return HtmlResponse(url=spider.bro.current_url, body=page_text, encoding='utf-8', request=request)
        else:
            return response


'''
代理池
'''
# 批量对拦截到的请求进行ip更换
# 单独封装下载中间件类


class Proxy(object):
    def process_request(self, request, spider):
        # 对拦截到请求的url进行判断(协议头到底是http还是https)
        # request.url返回值:http://www.xxx.com
        h = request.url.split(':')[0]  # 请求的协议头
        if h == 'https':
            ip = random.choice(PROXY_https)
            request.meta['proxy'] = 'https://'+ip
        else:
            ip = random.choice(PROXY_http)
            request.meta['proxy'] = 'http://' + ip

# 可被选用的代理IP
PROXY_http = [
    '153.180.102.104:80',
    '195.208.131.189:56055',
]
PROXY_https = [
    '120.83.49.90:9000',
    '95.189.112.214:35508',
]

middlewares.py
middlewares.py

相关文章:

  • 2021-09-28
  • 2021-06-05
猜你喜欢
  • 2021-10-01
  • 2021-09-03
  • 2021-08-28
  • 2022-12-23
  • 2022-02-06
  • 2021-05-12
相关资源
相似解决方案