分布式块设备DRBD部署

1. 分布式块设备DRBD部署

官方文档:https://linbit.com/drbd-user-guide/drbd-guide-9_0-cn/#ch-pacemaker

1.1 基础环境初始化

1.2 部署DRBD环境

主备服务器都需要执行

  1.  
    # 更新系统内核,并重启服务器
  2.  
    [root@node1 ~]# yum install kernel-devel kernel -y
  3.  
    [root@node1 ~]# reboot
  4.  
     
  5.  
    # 开始安装DRBD
  6.  
    [root@node1 software]# tar zxf drbd-9.0.18-1.tar.gz
  7.  
    [root@node1 drbd-9.0.18-1]# make KDIR=/usr/src/kernels/3.10.0-957.21.3.el7.x86_64/
  8.  
    [root@node1 drbd-9.0.18-1]# make install
  9.  
    [root@node1 software]# yum install drbd90-utils-9.6.0-1.el7.elrepo.x86_64.rpm -y
  10.  
    [root@node1 software]# yum install drbd90-utils-sysvinit-9.6.0-1.el7.elrepo.x86_64.rpm -y

1.3 创建drbd资源文件

  1.  
    [root@node1 software]# vim /etc/drbd.d/data.res
  2.  
    resource data {
  3.  
    on node1 { # on 主机名
  4.  
    device /dev/drbd0; # 映射的drbd磁盘,可默认,本教程默认/dev/sdb1
  5.  
    disk /dev/sdb1; # 设置后面存放数据的drbd磁盘
  6.  
    address 192.168.10.30:7789;
  7.  
    meta-disk internal;
  8.  
    }
  9.  
    on node2 {
  10.  
    device /dev/drbd0;
  11.  
    disk /dev/sdb1;
  12.  
    address 192.168.10.40:7789;
  13.  
    meta-disk internal;
  14.  
    }
  15.  
    }

1.4 修改drbd全局和通用配置文件

  1.  
    [root@node1 ~]# vim /etc/drbd.d/global_common.conf
  2.  
    global {
  3.  
    usage-count yes;
  4.  
    }
  5.  
    common {
  6.  
    handlers {
  7.  
    }
  8.  
    startup {
  9.  
    }
  10.  
    options {
  11.  
    # 当块设备被挂在时自动提升为primary,被卸载时候自动降级为secondary
  12.  
    auto-promote yes;
  13.  
    }
  14.  
     
  15.  
    disk {
  16.  
    }
  17.  
     
  18.  
    net {
  19.  
    # 使用协议C,保证实时同步
  20.  
    protocol C;
  21.  
    }
  22.  
    }

1.5 启动drbd服务

[root@node1 ~]# systemctl start drbd

1.6 创建drbd磁盘分区

  1.  
    # 如果用于drbd存储分区的磁盘之前已经存在文件系统例如执行过mkfs.xfs /dev/sdb1操作的话,在创建drbd metadata时候会报错此时需要破坏原先的文件系统,执行如下命令
  2.  
    [root@node1 ~]# dd if=/dev/zero of=/dev/sdb1 bs=1M count=100
  3.  
     
  4.  
    # 创建metadata
  5.  
    [root@node1 ~]# drbdadm create-md data
  6.  
    --== Thank you for participating in the global usage survey ==--
  7.  
    The server's response is:
  8.  
    initializing activity log
  9.  
    initializing bitmap (320 KB) to all zero
  10.  
    Writing meta data...
  11.  
    New drbd meta data block successfully created.
  12.  
     
  13.  
    # 查看此时drbd运行状态,node1和node2均为secondary,且Inconsistent数据处于未同步状态
  14.  
    [root@node1 ~]# drbdadm status data
  15.  
    data role:Secondary
  16.  
    disk:Inconsistent
  17.  
    node2 role:Secondary
  18.  
    peer-disk:Inconsistent

1.7 设置drbd主节点并格式化文件系统

  1.  
    # 第一次需要时手动设置一个主节点,后续可以通过mount自动切换主备节点
  2.  
    [root@node1 ~]# drbdadm primary --force data
  3.  
     
  4.  
    # 创建文件系统
  5.  
    [root@node1 ~]# mkfs.xfs /dev/drbd1
  6.  
    meta-data=/dev/drbd1 isize=512 agcount=4, agsize=655210 blks
  7.  
    = sectsz=512 attr=2, projid32bit=1
  8.  
    = crc=1 finobt=0, sparse=0
  9.  
    data = bsize=4096 blocks=2620839, imaxpct=25
  10.  
    = sunit=0 swidth=0 blks
  11.  
    naming =version 2 bsize=4096 ascii-ci=0 ftype=1
  12.  
    log =internal log bsize=4096 blocks=2560, version=2
  13.  
    = sectsz=512 sunit=0 blks, lazy-count=1
  14.  
    realtime =none
  15.  
     
  16.  
    # 挂载到本地目录
  17.  
    [root@node1 ~]# mkdir /mydata
  18.  
    [root@node1 ~]# mount /dev/drbd1 /mydata/
  19.  
    [root@node1 ~]# df -h
  20.  
    Filesystem Size Used Avail Use% Mounted on
  21.  
    /dev/mapper/centos-root 8.0G 1.5G 6.6G 18% /
  22.  
    devtmpfs 475M 0 475M 0% /dev
  23.  
    tmpfs 487M 0 487M 0% /dev/shm
  24.  
    tmpfs 487M 7.6M 479M 2% /run
  25.  
    tmpfs 487M 0 487M 0% /sys/fs/cgroup
  26.  
    /dev/sda1 1014M 156M 859M 16% /boot
  27.  
    tmpfs 98M 0 98M 0% /run/user/0
  28.  
    /dev/drbd1 10G 33M 10G 1% /mydata
  29.  
    [root@node1 ~]# drbdadm status data
  30.  
    data role:Primary
  31.  
    disk:UpToDate
  32.  
    node2 role:Secondary
  33.  
    peer-disk:UpToDate

1.8 主备切换测试

  1.  
    # /mydata/下写入文件后,主机卸载磁盘,备机挂载磁盘,备机上观察文件是否同步过来
  2.  
    [root@node1 ~]# umount /mydata/
  3.  
    [root@node1 ~]# drbdadm secondary data
  4.  
    [root@node1 ~]# drbdadm status data
  5.  
    data role:Secondary
  6.  
    disk:UpToDate
  7.  
    node2 role:Secondary
  8.  
    peer-disk:UpToDate
  9.  
     
  10.  
    [root@node2 ~]# mkdir /mydata
  11.  
    [root@node2 ~]# mount /dev/drbd1 /mydata/
  12.  
    [root@node2 ~]# drbdadm status data
  13.  
    data role:Primary
  14.  
    disk:UpToDate
  15.  
    node1 role:Secondary
  16.  
    peer-disk:UpToDate
  17.  
     
  18.  
    [root@node2 ~]# ls /mydata/
  19.  
    a b c

2. 高可用组件pacemaker+corosync部署

2.1 安装相关组件

  1.  
    # 主备机添加crm管理工具yum源
  2.  
    [root@node1 ~]# vim /etc/yum.repos.d/crmsh.repo
  3.  
    [network_ha-clustering_Stable]
  4.  
    name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
  5.  
    type=rpm-md
  6.  
    baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/
  7.  
    gpgcheck=1
  8.  
    gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/repodata/repomd.xml.key
  9.  
    enabled=1
  10.  
     
  11.  
    # 安装crmsh管理工具
  12.  
    [root@node1 ~]# yum install crmsh pacemaker corosync

2.2 配置corosync

  1.  
    [root@node1 ~]# cd /etc/corosync/
  2.  
    [root@node1 corosync]# cp corosync.conf.example corosync.conf
  3.  
    [root@node1 corosync]# vim corosync.conf
  4.  
    totem {
  5.  
    version: 2
  6.  
     
  7.  
    crypto_cipher: aes256
  8.  
    crypto_hash: sha1
  9.  
     
  10.  
    interface {
  11.  
    ringnumber: 0
  12.  
    bindnetaddr: 192.168.10.30
  13.  
    mcastaddr: 239.255.1.1
  14.  
    mcastport: 5405
  15.  
    ttl: 1
  16.  
    }
  17.  
    }
  18.  
     
  19.  
    logging {
  20.  
    fileline: off
  21.  
    to_stderr: no
  22.  
    to_logfile: yes
  23.  
    logfile: /var/log/cluster/corosync.log
  24.  
    to_syslog: yes
  25.  
    debug: off
  26.  
    timestamp: on
  27.  
    logger_subsys {
  28.  
    subsys: QUORUM
  29.  
    debug: off
  30.  
    }
  31.  
    }
  32.  
     
  33.  
    quorum {
  34.  
    provider: corosync_votequorum
  35.  
    }
  36.  
     
  37.  
    nodelist {
  38.  
    node {
  39.  
    ring0_addr: node1
  40.  
    nodeid: 1
  41.  
    }
  42.  
    node {
  43.  
    ring0_addr: node2
  44.  
    nodeid: 2
  45.  
    }
  46.  
    }
  47.  
     
  48.  
    # 生成corosync**
  49.  
    [root@node1 corosync]# corosync-******
  50.  
    [root@node1 corosync]# scp authkey root@node2:/etc/corosync/
  51.  
    [root@node1 corosync]# scp corosync.conf root@node2:/etc/corosync/
  52.  
     
  53.  
    # 启动corosync和pacemaker
  54.  
    [root@node1 ~]# systemctl start corosync
  55.  
    [root@node1 ~]# systemctl start pacemaker
  56.  
     
  57.  
    # 查看集群状态
  58.  
    [root@node1 corosync]# crm_mon
  59.  
    Stack: corosync
  60.  
    Current DC: node1 (version 1.1.19-8.el7_6.4-c3c624ea3d) - partition with quorum
  61.  
    Last updated: Fri Jul 5 21:48:22 2019
  62.  
    Last change: Fri Jul 5 21:45:52 2019 by hacluster via crmd on node1
  63.  
     
  64.  
    2 nodes configured
  65.  
    0 resources configured
  66.  
     
  67.  
    Online: [ node1 node2 ]
  68.  
     
  69.  
    No active resources

2.3 关闭stonish设备

  1.  
    [root@node1 ~]# crm
  2.  
    crm(live)# configure
  3.  
    crm(live)configure# show
  4.  
    node 1: node1
  5.  
    node 2: node2
  6.  
    property cib-bootstrap-options: \
  7.  
    have-watchdog=false \
  8.  
    dc-version=1.1.19-8.el7_6.4-c3c624ea3d \
  9.  
    cluster-infrastructure=corosync
  10.  
    crm(live)configure# property stonith-enabled=false
  11.  
    crm(live)configure# property no-quorum-policy=ignore
  12.  
    crm(live)configure# property start-failure-is-fatal=false
  13.  
    crm(live)configure# property default-action-timeout=180s
  14.  
    crm(live)configure# rsc_defaults resource-stickiness=100
  15.  
    crm(live)configure# verify
  16.  
    crm(live)configure# commit
  17.  
    crm(live)configure# show
  18.  
    node 1: node1
  19.  
    node 2: node2
  20.  
    property cib-bootstrap-options: \
  21.  
    have-watchdog=false \
  22.  
    dc-version=1.1.19-8.el7_6.4-c3c624ea3d \
  23.  
    cluster-infrastructure=corosync \
  24.  
    stonith-enabled=false \
  25.  
    no-quorum-policy=ignore \
  26.  
    start-failure-is-fatal=false \
  27.  
    default-action-timeout=180s
  28.  
    rsc_defaults rsc-options: \
  29.  
    resource-stickiness=100

2.4 添加Virtual IP资源

  1.  
    crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=192.168.10.50 op monitor 30s
  2.  
    crm(live)configure# commit

2.4 添加DRBD磁盘资源

添加drbd磁盘开机自启动,因为集群只是接管drbd的挂载,而不是启动drbd,所有drbd状态必须都是secondary

  1.  
    crm(live)configure# primitive drbd ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/mydata fstype=xfs
  2.  
    crm(live)configure# commit

2.5 绑定VIP和DRBD资源,并设置先启动VIP后启动DRBD

利用group命令进行分组和排序启动

crm(live)configure# group vip_drbd vip drbd
crm(live)configure# commit
版权声明:本文为bojiongji4091原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/bojiongji4091/article/details/100964840
 
原文地址:https://www.cnblogs.com/cheyunhua/p/15654403.html