Ubuntu搭建基於DRBD的iSCSI-Target高可用

實驗環境:

  1. 系統:Ubuntu 20.04 Server
  2. 節點主機,/dev/sdb是新增硬盤
主機名 IP地址
node1 172.16.0.31/24
node1 172.16.0.31/24
   Static hostname: node2
         Icon name: computer-vm
           Chassis: vm
        Machine ID: cb06f418971e416bbca55d643cd37974
           Boot ID: bc1cd664405d411a85eeae0779633db3
    Virtualization: vmware
  Operating System: Ubuntu 20.04.3 LTS
            Kernel: Linux 5.4.0-91-generic
      Architecture: x86-64

實驗步驟:

  1. 設置網絡,主機名
# node1: 172.16.0.31/24 node2:172.16.0.32/24
# 修改IP
nmcli con mod ens160 ipv4.method manual ipv4.addresses 172.16.0.31/24 ipv4.gateway 172.16.0.253 ipv4.dns 172.16.0.2,172.16.0.3 connection.autoconnect yes
# 修改主機名
hostnamectl set-hostname node1
# 修改DNS記錄
vi /etc/hosts
# Cluster Nodes
172.16.0.31 node1
172.16.0.32 node2
# 創建lv磁盤
pvcreate /dev/sdb
# 創建組
vgcreate drbd-vg /dev/sdb
# 創建分區
lvcreate -L 1G -n r0 drbd-vg
# 禁止DRBD磁盤使用多路徑
vi /etc/multipath.conf
# 添加規則
blacklist {
    devnode "^drbd[0-9]"
}
# 安裝iSCSI-Target服務
apt install tgt
# 關閉防火牆
ufw disable
# 如果不關閉防火牆,則需要放行端口
# DRBD端口
ufw allow 7790/tcp
# corosync端口
ufw allow 5405/udp
# iSCSI-Target
ufw allow iscsi-target
ufw reload
  1. 安裝和配置DRBD
# 安裝DRBD
apt install drbd-utils
# 加載模塊
modprobe drbd
# 檢查模塊加載
lsmod | grep drbd
# 開機自動加載模塊
echo drbd >> /etc/modules-load.d/modules.conf
# 修改全局配置
cd /etc/drbd.d/
vi global_common.conf

# DRBD is the result of over a decade of development by LINBIT.
# In case you need professional services for DRBD or have
# feature requests visit http://www.linbit.com

global {
	usage-count no;
	udev-always-use-vnr;
}
common {
	# 腦裂處理
	handlers {
		pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
		pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
		local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
		fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
	}
	disk {
		# 磁盤IO錯誤時分離
		on-io-error detach;
		fencing resource-only;
	}
	net {
		protocol C;
	}
}
# 創建資源r0
vi r0.res
resource "r0" {
        # DRBD磁盤
        device  "/dev/drbd0";
        # 存儲
        disk    "/dev/mapper/drbd--vg-r0";
        meta-disk internal;
        # 節點1
        on "node1" {
                # 地址
                address 172.16.0.31:7790;
        }
        on "node2" {
                address 172.16.0.32:7790;
        }
}
  1. 安裝和配置Corosync,Pacemaker
# 安裝corosync,pacemaker
apt install pacemaker
# 編輯corosync.conf
vi /etc/corosync/corosync.conf

totem {
        version: 2
        secauth: off
        cluster_name: mycluster
        transport: udpu
}

nodelist {
        node {
                name: node1
                ring0_addr: 172.16.0.31
                nodeid: 1
        }
        node {
                name: node2
                ring0_addr: 172.16.0.32
                nodeid: 2
        }
}

quorum {
        provider: corosync_votequorum
        two_node: 1
        wait_for_all: 1
        last_man_standing: 1
        auto_tie_breaker: 0
}
# 重啓服務
systemctl restart corosync.service pacemaker.service
  1. 配置iSCSI集羣
# 全局設置
# 集羣保護
crm configure property stonith-enabled=false
# 忽略quorum
crm configure property no-quorum-policy=ignore
# 資源粘度
crm configure property rsc-options resource-stickiness=200
# 故障觸發資源數
crm configure property migration-threshold=1

# 創建資源
primitive p_drbd_res ocf:linbit:drbd params drbd_resource="r0" op monitor timeout="20" interval="20" role="Slave" op monitor timeout="20" interval="10" role="Master"
# 節點配置
ms ms_drbd_res p_drbd_res meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" interleave="true"
# 虛擬IP
primitive p_ip ocf:heartbeat:IPaddr2 params ip="172.16.0.30" cidr_netmask="24" op monitor timeout="20s" interval="10s" depth="0"
# iSCSI-Target
primitive r0_target ocf:heartbeat:iSCSITarget params implementation="tgt" iqn="iqn.2021-12.drbd.stroage:target.ha" tid="1" allowed_initiators="ALL" op monitor timeout="10s" interval="10s" depth="0"
# iSCSI-Target-Lun
primitive r0_target_lun1 ocf:heartbeat:iSCSILogicalUnit params implementation="tgt" target_iqn="iqn.2021-12.drbd.stroage:target.ha" lun="1" path="/dev/drbd0" op monitor timeout="10s" interval="10s" depth="0"
# 創建資源組
group r0_group r0_target r0_target_lun1 p_ip
# DRBD運行規則
order o_drbd_before_p_drbd_res inf: ms_drbd_res:promote r0_group:start
colocation c_r0_on_drbd inf: r0_group ms_drbd_res:Master
  1. 測試集羣

#模擬node1掉線後,ping vip只有1個丟包,故障轉移成功
root@node2:/etc/drbd.d# crm status
Cluster Summary:
  * Stack: corosync
  * Current DC: node2 (version 2.0.3-4b1f869f0f) - partition with quorum
  * Last updated: Fri Dec 24 13:12:25 2021
  * Last change:  Fri Dec 24 13:11:37 2021 by root via cibadmin on node2
  * 2 nodes configured
  * 5 resource instances configured

Node List:
  * Online: [ node2 ]
  * OFFLINE: [ node1 ]

Full List of Resources:
  * Clone Set: ms_drbd_res [p_drbd_res] (promotable):
    * Masters: [ node2 ]
    * Stopped: [ node1 ]
  * Resource Group: r0_group:
    * r0_target (ocf::heartbeat:iSCSITarget):    Started node2
    * r0_target_lun1    (ocf::heartbeat:iSCSILogicalUnit):       Started node2
    * p_ip      (ocf::heartbeat:IPaddr2):        Started node2
  1. 參考資料
# https://linux.die.net/man/5/targets.conf
# http://crmsh.github.io/man-2.0/
# http://www.interbit.com.pl/wp-content/uploads/2013/07/ha-iscsi.pdf
# https://linbit.com/drbd-user-guide/drbd-guide-9_0-cn/
# https://www.mankier.com/package/resource-agents
# https://www.mankier.com/7/ocf_linbit_drbd
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章