MFS高可用(pcs集羣管理與fence自動跳電重啓)

server1 master   server4  高可用

serever 2 3  chunk

真機  client

具體查看上片文章

server1

yum install moosefs-cgi-3.0.103-1.rhsystemd.x86_64.rpm moosefs-cgiserv-3.0.103-1.rhsystemd.x86_64.rpm moosefs-cli-3.0.103-1.rhsystemd.x86_64.rpm moosefs-master-3.0.103-1.rhsystemd.x86_64.rpm -y


server2 3 

 yum install -y moosefs-chunkserver-3.0.103-1.rhsystemd.x86_64.rpm

server4

yum install  moosefs-master-3.0.103-1.rhsystemd.x86_64

 

 

[root@server1 mfs]# du -sh /var/lib/mfs
3.6M    /var/lib/mfs

 

server1   4

yum install -y  pacemaker corosync  pcs

yum.repo

[HighAvailability]
name=HighAvailability
baseurl=http://172.25.11.250/rhel7.3/addons/HighAvailability
gpgcheck=0

[ResilientStorage]
name=HighAvailability
baseurl=http://172.25.11.250/rhel7.3/addons/ResilientStorage
gpgcheck=0

 

做好免密 1->4  

ssh-copy-id  server4  server1

 

systemctl isolate multiuser  關閉圖形

systemcctl isolate  graphical  啓動圖形  相當於 init 3  4

 

systemctl  start   enalbe  pcsd

[root@server1 mfs]# systemctl start pcsd
[root@server1 mfs]# systemctl enable pcsd

passwd hacluster

 

server1上

pcs  cluster auth  server1  server4

[root@server1 mfs]# pcs  cluster auth  server1  server4
Username: hacluster
Password:
server4: Authorized
server1: Authorized

 

pcs cluster setup   --name mycluster server1  server4

pcs  cluster start   server1   server4

[root@server1 mfs]# pcs status  corosync

Membership information
----------------------
    Nodeid      Votes Name
         1          1 server1 (local)
         2          1 server4

 

journalctl  | grep -i error

pcs property set stonith-enabled=false

crm_verify -L -V       主備集羣

pcs  cluster start  server1   server4

pcs  cluster start  --all

[root@server1 mfs]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 14:16:28 2019        Last change: Fri Apr  5 14:15:31 2019 by root via cibadmin on server1

2 nodes and 0 resources configured

Online: [ server1 server4 ]

No resources
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

 

[root@server1 mfs]# corosync-cfgtool  -s
Printing ring status.
Local node ID 1
RING ID 0
    id    = 172.25.11.1
    status    = ring 0 active with no faults

[root@server4 3.0.103]# corosync-cfgtool  -s
Printing ring status.
Local node ID 2
RING ID 0
    id    = 172.25.11.4
    status    = ring 0 active with no faults

 

[root@server1 mfs]# cat /etc/corosync/corosync.conf
totem {
    version: 2
    secauth: off
    cluster_name: mycluster
    transport: udpu
}

nodelist {
    node {
        ring0_addr: server1
        nodeid: 1
    }

    node {
        ring0_addr: server4
        nodeid: 2
    }
}

 

 

client

 

[root@foundation11 mfs]# umount  /mnt/mfs/mfsmeta
[root@foundation11 mfs]# umount  /mnt/mfs

 

server1-4

systemctl stop  moosefs-chunkserver

systemctl stop  moosefs-master

 

修改hosts  爲對應的虛擬IP   mfsmaster

172.25.11.100 mfsmaster

######

 

 

server2

yum install targetcli

systemctl start targetcli

tatgetcli

create  my_disk1  /dev/vdb

create  iqn.2019-04.com.example:server3

tpg1/luns  create

 

server1  

yum install iscsi-* -y

iscsiadm -m discovery -t st -p 172.25.11.3

systemctl restart iscsid

iscsiadm -m node -o delete

iscsiadm -m node -l

 

cat /etc/iscsi/initiatorname.iscsi

 

server1

mkfs.xfs  /dev/sdb 

mount /dev/sdb  /mnt

cd /var/lib/mfs/

cp -p * /mnt/

chown mfs.mfs  /mnt

systemctl start moosefs-master

再停

 

server4 

yum install iscsi-* -y

cat /etc/iscsi/initiatorname.iscsi

iscsiadm -m discovery -t st -p 172.25.11.3

iscsiadm -m node -l

mount  /dev/sdb  /var/lib/mfs

systemctl start moosefs-master      stop

 

 

server1

 

掛載iscsi設備

pcs resource  create vip ocf:heartbeat:IPaddr2 ip=172.25.11.100 cidr_netmask=32 op monitor interval=30s

pcs resource create mfsdata  ocf:heartbeat:Filesystem device=/dev/sdb directory=/var/lib/mfs fstype=xfs op monitor interval=30s

 

pcs resource  create mfsd  systemd:moosefs-master op monitor interval=1min

pcs resource group  add mfsgroup  vip mfsdata  mfsd

pcs cluster stop server1  開始遷移

 

server2  3

systemctl  start moosefs-chunkserver

 

client

mfsmount

cd /mnt/mfs 

crm_mon

Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 16:55:45 2019          Last change: Fri Apr  5 16:55:42 2019 by root via cibadmin on serv
er1

2 nodes and 3 resources configured

Online: [ server1 server4 ]

Active resources:

 Resource Group: mfsgroup
     vip        (ocf::heartbeat:IPaddr2):    Started server1
     mfsd    (systemd:moosefs-master):    Started server1
mfsdata (ocf::heartbeat:Filesystem):    Started server4

 

 

通過fence 解決內核錯誤無法自動關機

client 真機

echo c > /proc/sysrq-trigger

server1 4

yum list fence_virtd

yum install fence-virtd  -y fence-virt

cd /etc/cluster/

dd  if=/dev/urandom  of=fence_xvm.key  bs=128 count=1

fence_virtd -c

添加網卡選擇如果默認不是br0的時候不要全回車

systemctl start fence_virtd

 

server1  4

mkdir  /etc/cluster

scp   fence_xvm.key   server4:/etc/cluster 
 

 

pcs stonith create  vmfence  fence_xvm pcmk_host_map="server1:server1;server4:server4"  op monitor interval=1min

pcs property set stonith-enabled=true 

 

map是"主機名:域名"的映射

 

[root@foundation11 mfs]# virsh list
 Id    Name                           State
----------------------------------------------------
 17    server1                        running
 22    server3                        running
 27    server4                        running

 

 

crm_verify -L  -V  校驗是否有錯誤

 

fence_xvm  -H server4   

會自動重啓server4

systemctl status pcsd

pcs cluster start server4 

crm_mon

Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 17:19:15 2019          Last change: Fri Apr  5 17:18:58 2019 by root via cibadmin on serv
er1

2 nodes and 4 resources configured

Online: [ server1 server4 ]

Active resources:

 Resource Group: mfsgroup
     vip        (ocf::heartbeat:IPaddr2):    Started server1
     mfsd    (systemd:moosefs-master):    Started server1
mfsdata (ocf::heartbeat:Filesystem):    Started server4
vmfence (stonith:fence_xvm):    Started server4

 

 

測試的時候

server4

echo c > /proc/sysrq-trigger

會自動重起

 

 cat /etc/fence_virt.conf

fence_virtd {
    listener = "multicast";
    backend = "libvirt";
    module_path = "/usr/lib64/fence-virt";
}

listeners {
    multicast {
        key_file = "/etc/cluster/fence_xvm.key";
        address = "225.0.0.12";
        interface = "br0";
        family = "ipv4";
        port = "1229";
    }

}

backends {
    libvirt {
        uri = "qemu:///system";
    }

}

 

 

 

 

 

 

 

 

 

 

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章