集群-架构

2台虚拟机

[root@linux-node1 ~]# rpm -ivh http://mirrors.aliyun.com/epel/epel-release-latest-7.noarch.rpm

[root@linux-node1 ~]# yum install -y net-tools vim lrzsz tree screen lsof tcpdump nc mtr nmap gcc glibc gcc-c++ make haproxy keepalived nginx  psmisc

vim /etc/haproxy/haproxy.cfg

global
maxconn 100000
chroot /var/lib/haproxy
user haproxy
group haproxy
daemon
nbproc 1
pidfile /usr/local/haproxy/logs/haproxy.pid
log 127.0.0.1 local3 info

defaults
option http-keep-alive
maxconn 100000
mode http
timeout connect 5000ms
timeout client  50000ms
timeout server 50000ms

listen stats
mode http
bind 0.0.0.0:8888
stats enable
stats uri     /haproxy-status
stats auth    haproxy:saltstack

frontend frontend_www_example_com
bind 10.240.17.100:80
mode http
option httplog
log global
    default_backend backend_www_example_com

backend backend_www_example_com
option forwardfor header X-REAL-IP
option httpchk HEAD / HTTP/1.0
balance source
server web-node1  10.240.17.100:8080 check inter 2000 rise 30 fall 15
server web-node2  10.240.17.103:8080 check inter 2000 rise 30 fall 15

[root@node1 www]# systemctl restart haproxy

[root@node1 www]# mkdir /data/www/www -p

[root@node1 www]# echo "node1" /data/www/www/index.html

[root@node1 www]# vim /data//ops/app/tengine-2.1.0/conf/vhost.default.conf 

server {
        listen 8080;
        location / {
            root /data/www/www;
            index index.html index.htm;
        }
        error_log    logs/error_www.abc.com.log error;
        access_log    logs/access_www.abc.com.log    main;
}

[root@node2 www]# systemctl restart haproxy

[root@node2 www]# mkdir /data/www/www -p

[root@node2 www]# echo "node2" /data/www/www/index.html

[root@node2 www]# vim /data//ops/app/tengine-2.1.0/conf/vhost.default.conf 

server {
        listen 8080;
        location / {
            root /data/www/www;
            index index.html index.htm;
        }
        error_log    logs/error_www.abc.com.log error;
        access_log    logs/access_www.abc.com.log    main;
}

网友访问发现一直是访问node2因为haproxy配置文件定义了 balance source 根据源IP算法来访问的,如果想轮训改成 roundrobin

yum install keepalived

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
global_defs {
   notification_email {
     saltstack@example.com
   }
   notification_email_from keepalived@example.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haproxy_node1
}

vrrp_instance haproxy_ha {
state MASTER
interface em1
    virtual_router_id 36
priority 150
    advert_int 1
authentication {
auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       10.240.17.222
    }
}

[root@node1 ~]# echo 1 > /proc/sys/net/ipv4/ip_nonlocal_bind 

[root@node1 ~]# vim /etc/haproxy/haproxy.cfg 

bind 10.240.17.222:80

[root@node1 ~]# systemctl start keepalived

[root@node1 ~]# systemctl reload haparoxy

###

[root@node2 ~]# yum install keepalived -y 

! Configuration File for keepalived
global_defs {
   notification_email {
     saltstack@example.com
   }
   notification_email_from keepalived@example.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haproxy_node2
}

vrrp_instance haproxy_ha {
state BACKUP
interface em1
    virtual_router_id 36
priority 100
    advert_int 1
authentication {
auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       10.240.17.222
    }
}

[root@node2 ~]# vim /etc/haproxy/haproxy.cfg 

bind 10.240.17.222:80

[root@node2 ~]# echo 1 >  /proc/sys/net/ipv4/ip_nonlocal_bind

[root@node2 ~]# /etc/init.d/haproxy restart

[root@node2 ~]# /etc/init.d/keepalived restart

然后用网页访问VIP,发现node1 keepalived停了虚IP飘逸到node2了,如果实现切换就写检查脚本

###

灾难恢复    根据信息系统灾难恢复规范来定义级别。

1.核心业务,非核心业务

2.从重要数据到非重要数据

3.从下往上

4.灾备演练

5.徘徊在冷备和双活之间

原文地址:https://www.cnblogs.com/zhaobin-diray/p/9175149.html