1.1 環境準備
1.1.1 添加新磁盤,分區並格式化
fdisk -uc /dev/sdb mkfs.ext4 /dev/sdb1 dd if=/dev/zero of=/dev/sdb1bs=1M count=1
1.1.2 在hosts文件中添加以下記錄
cat /etc/host 172.16.1.111 test01 172.16.1.112 test02 172.16.1.113 web
1.1.3 關閉selinux和iptables
setenforce 0 /etc/init.d/iptables stop
1.1.4 測試約定
主機名,系統版本及內核版本:
[root@test01 ~]# uname -nr test01 2.6.32-573.el6.x86_64 [root@test01 ~]# cat /etc/redhat-release CentOS release 6.7 (Final)
[root@test02 ~]# uname -nr test02 2.6.32-573.el6.x86_64 [root@test02 ~]# cat /etc/redhat-release CentOS release 6.7 (Final)
[root@web ~]# uname -nr web 2.6.32-573.el6.x86_64 [root@web ~]# cat /etc/redhat-release CentOS release 6.7 (Final)測試IP:
test01 172.16.1.111 test02 172.16.1.112 web 172.16.1.113 虛擬IP 172.16.1.100
1.2 DRBD的安裝
1.2.1 配置DRBD
默認官方源中沒有drbd軟件,所以使用elrepo源
[root@test01 ~]# rpm --importhttps://www.elrepo.org/RPM-GPG-KEY-elrepo.org [root@test01 ~]# rpm -Uvhhttp://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm Retrieving http://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm Preparing... ########################################### [100%] 1:elrepo-release ########################################### [100%] [root@test01 ~]# yum installdrbd84-utils kmod-drbd84 -y
[root@test01 ~]# modprobe drbd [root@test01 ~]# lsmod|grepdrbd drbd 365931 0 libcrc32c 1246 1 drbd
編輯配置文件
[root@test01 ~]# vim/etc/drbd.conf # You can find an examplein /usr/share/doc/drbd.../drbd.conf.example #include"drbd.d/*.res"; global{ usage-count no; } common{ syncer{ rate 200M; } } resource r1 { protocol C; startup{ wfc-timeout 120; degr-wfc-timeout 120; } disk { on-io-error detach; } net { timeout 60; connect-int 10; ping-int 10; max-buffers 2048; max-epoch-size 2048; cram-hmac-alg"sha1"; shared-secret"Mysql-abcD"; } on test01 { device /dev/drbd0; disk /dev/sdb1; address 172.16.1.31:6666; meta-disk internal; } on test02 { device /dev/drbd0; disk /dev/sdb1; address 172.16.1.41:6666; meta-disk internal; } }
1.2.3 初始化設備
[root@test01 ~]# drbdadmcreate-md r1 initializing activity log NOT initializing bitmap Writing meta data... New drbd meta data blocksuccessfully created. [root@test01 ~]# drbdadm up r1 [root@test01 ~]# cat /proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash:833d830e0152d1e457fa7856e71e11248ccf3f70 build by phil@Build64R6, 2015-04-0914:35:00 0: cs:Connected ro:Secondary/Secondaryds:Inconsistent/Inconsistent C r----- ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0ua:0 ap:0 ep:1 wo:f oos:5241660
現在兩個節點都處於Secondary狀態
將test01手動提升至primary。並將/dev/drbd0進行格式化以供掛在使用
[root@test01 ~]# drbdadmprimary --force r1 [root@test01 ~]# cat/proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70build by root@test01, 2015-12-07 10:40:31 0: cs:Connected ro:Primary/Secondaryds:UpToDate/UpToDate C r----- ns:1047484 nr:0 dw:0 dr:1048148 al:0 bm:0lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
1.2.4 格式化虛擬分區
[root@test01 ~]# mkfs.ext4/dev/drbd0 mke2fs 1.41.12 (17-May-2010) 文件系統標籤= 操作系統:Linux .. ... Writing superblocks andfilesystem accounting information: 完成 This filesystem will beautomatically checked every 33 mounts or 180 days, whichever comesfirst. Use tune2fs -c or -i to override. [root@test01 ~]# tune2fs -c -1/dev/drbd0 tune2fs 1.41.12 (17-May-2010) Setting maximal mount count to-1
1.2.5 掛載測試
[root@test01 ~]# mkdir /data [root@test01 ~]# mount -t ext4/dev/drbd0 /data [root@test01 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.5G 7.4G 17% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data [root@test01 ~]# touch/data/drbd.test [root@test01 ~]# ls /data/ drbd.test lost+found
手動切換連個節點狀態,並進行掛載測試
[root@test01 ~]# umount /data [root@test01 ~]# drbdadmsecondary r1 [root@test01 ~]# cat/proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash:833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@test01, 2015-12-0710:40:31 0: cs:Connected ro:Secondary/Secondaryds:UpToDate/UpToDate C r----- ns:1080828 nr:0 dw:33344 dr:1048921 al:10bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
將test01手動提升至primary。並將/dev/drbd0進行格式化以供掛在使用
[[email protected]]# drbdadm primary r1 [[email protected]]# cat /proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash:833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@test02, 2015-12-0710:40:31 0: cs:Connected ro:Primary/Secondaryds:UpToDate/UpToDate C r----- ns:0 nr:1080828 dw:1080828 dr:664 al:0 bm:0lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0 [root@test02 ~]# mkdir /data [root@test02 ~]# mount -t ext4/dev/drbd0 /data [root@test02 ~]# ls /data/ drbd.test lost+found [root@test01 ~]# umount /data [root@test01 ~]# drbdadmsecondary r1
現在drbd兩個節點已經配置成功,並手動測試成功。drbd的兩個節點,同時時間只有primary狀態的節點提供服務。
1.3 配置nfs服務
test01 test02 web配置nfs的共享目錄爲/data ,test01 test02同時/dev/drbd0的掛載目錄也是/data
1.3.1 將test01 drbd狀態提升爲primary並將/dev/drbd0掛載至/data,編輯/etc/exports文件,並本地測試。
[root@test01 ~]# cat/proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70build by root@test01, 2015-12-07 10:40:31 0: cs:Connected ro:Secondary/Secondaryds:UpToDate/UpToDate C r----- ns:1080828 nr:8 dw:33352 dr:1048921 al:10bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
[root@test01 ~]# drbdadmprimary r1 [root@test01 ~]# mount -t ext4/dev/drbd0 /data [root@test01 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.5G 7.4G 17% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data
[root@test01 ~]# cat/etc/exports /data172.16.1.0/24(rw,sync,all_squash)
[root@test01 ~]#/etc/init.d/rpcbind start [root@test01 ~]#/etc/init.d/nfs start [root@test01 ~]# chown -R nfsnobody.nfsnobody/data [root@test01 ~]# showmount -e172.16.1.111 Export list for 172.16.1.111: /data 172.16.1.0/24
[root@test01 ~]# mount -t nfs172.16.1.111:/data /mnt [root@test01 ~]# ls /mnt drbd.test lost+found [root@test01 ~]# touch/mnt/test01.test [root@test01 ~]# ls /mnt drbd.test lost+found test01.test
1.3.2 將test01狀態降爲secondary,將test02狀態提升爲primary,並將/dev/drbd0掛載至/data,編輯/etc/exports文件,並本地測試
[root@test01 ~]# umount /mnt [root@test01 ~]#/etc/init.d/nfs stop [root@test01 ~]# umount /data [root@test01 ~]# drbdadmsecondary r1 [root@test01 ~]# cat/proc/drbd version: 8.4.6(api:1/proto:86-101) GIT-hash:833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@test01, 2015-12-0710:40:31 0: cs:Connected ro:Secondary/Secondaryds:UpToDate/UpToDate C r----- ns:1080952 nr:8 dw:33476 dr:1049979 al:10bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
[root@test02 ~]# drbdadm primary r1 [root@test02 ~]# mount -t ext4/dev/drbd0 /data [root@test02 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.5G 7.4G 17% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data
[root@test02 ~]# cat/etc/exports /data172.1.6.0/24(rw,sync,all_squash)
[root@test02 ~]#/etc/init.d/rpcbind start [root@test02 ~]#/etc/init.d/nfs start [root@test02 ~]# chown -Rnfsnobody.nfsnobody /data [root@test02 ~]# showmount -e172.16.1.112 Export list for 172.16.1.112: /data 172.16.1.0/24
[root@test02 ~]# mount -t nfs172.16.1.112:/data /mnt [root@test02 ~]# ls /mnt drbd.test lost+found test01.test [root@test02 ~]# touch/mnt/test02.test [root@test02 ~]# ls /mnt drbd.test lost+found test01.test test02.test
[root@test02 ~]# umount /mnt [root@test02 ~]#/etc/init.d/nfs stop [root@test02 ~]# umount /data [root@test02 ~]# drbdadmsecondary r1 version: 8.4.6(api:1/proto:86-101) GIT-hash:833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@test02, 2015-12-0710:40:31 0: cs:Connected ro:Secondary/Secondaryds:UpToDate/UpToDate C r----- ns:124 nr:1080952 dw:1081076 dr:1726 al:2bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
至此:drbd nfs手動掛載測試成功
1.4 配置heartbeat
配置heartbeat以達到nfs主節點失效自動將所有資源切換到備用節點
1.4.1 兩臺nfs服務器上安裝heartbeat
添加epel擴展源。官方yum源沒有heartbeat
yum -y install epel-release yum install heartbeat -y
1.4.2 將兩臺服務器默認配置文件拷貝到/etc/ha.d/並做更改
[root@test01 ~]# rpm -qdheartbeat|grep doc /usr/share/doc/heartbeat-3.0.4/AUTHORS /usr/share/doc/heartbeat-3.0.4/COPYING /usr/share/doc/heartbeat-3.0.4/COPYING.LGPL /usr/share/doc/heartbeat-3.0.4/ChangeLog /usr/share/doc/heartbeat-3.0.4/README /usr/share/doc/heartbeat-3.0.4/apphbd.cf /usr/share/doc/heartbeat-3.0.4/authkeys /usr/share/doc/heartbeat-3.0.4/ha.cf /usr/share/doc/heartbeat-3.0.4/haresources
[root@test01 ~]# cp/usr/share/doc/heartbeat-3.0.4/authkeys /usr/share/doc/heartbeat-3.0.4/ha.cf/usr/share/doc/heartbeat-3.0.4/haresources /etc/ha.d/
[root@test01 ~]# egrep -v"#|^$" /etc/ha.d/ha.cf logfile /var/log/ha-log logfacility local0 keepalive 2 deadtime 30 warntime 10 initdead 60 mcast eth0 225.0.0.1 694 1 0 auto_failback on node test01 node test02
[root@test01 ~]# egrep -v"#|^$" /etc/ha.d/authkeys auth 1 1 sha1 liyanan [root@test01 ~]# chmod600 /etc/ha.d/authkeys [root@test01 ~]# ll /etc/ha.d/authkeys -rw------- 1 root root 647 12月 7 15:13 /etc/ha.d/authkeys [root@test01 ~]#echo " test01drbddisk::r1 Filesystem::/dev/drbd0::/data::ext4 nfsIPaddr::172.16.1.100/24/eth1 " >>/etc/ha.d/haresources
將附件的腳本加入到/etc/ha.d/resource.d/ 下並賦予可執行權限。
1.4.3 啓動兩臺服務器heartbeat
[root@test01 ~]#/etc/init.d/heartbeat start [root@test01 ~]#/etc/init.d/heartbeat status heartbeat OK [pid 5362 et al]is running on test01 [test01]...
1.4.4 測試heartbeat
全部啓動heartbeat服務
[root@test01 ha.d]#/etc/init.d/heartbeat status heartbeat OK [pid 22646 et al]is running on test01 [test01]... [root@test02 ~]#/etc/init.d/heartbeat status heartbeat OK [pid 13217 et al]is running on test02 [test02]...
檢查test01掛載
[root@test01 ha.d]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data
檢查test02掛載
[root@test02 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot
關閉test01的heartbeat服務後再檢查掛載
[root@test01 ha.d]#/etc/init.d/heartbeat stop Stopping High-Availabilityservices: Done.
[root@test01 ha.d]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot
再次檢查test02掛載
[root@test02 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data
再次啓動test01的heartbeat服務,並檢查掛載
[root@test01 ha.d]#/etc/init.d/heartbeat start Starting High-Availabilityservices: Done.
[root@test01 ha.d]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 991M 1.3M 939M 1% /data
再次檢查test02掛載
[root@test02 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.3G 18% / tmpfs 495M 0 495M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot
經過循環測試,證明heartbeat服務已經成功接管drbd nfs VIP服務
可通過VIP向外部提供nfs掛載解決nfs單點故障。
終極測試,循環寫入數據測試丟包率
測試方式:由test01爲主,提供nfs服務、test02爲備,接管nfs服務。web掛載test01提供的nfs共享目錄。通過間隔 1S 連續在web的掛載點上寫入數據,期間手動關閉test01的heartbeat服務,造成意外宕機的實驗效果,等腳本執行完畢後,在web端查看寫入的數據,以便檢測丟包率。
檢查掛載情況及虛擬IP
[root@test01 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.4G 18% / tmpfs 242M 0 242M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot /dev/drbd0 4.8G 11M 4.6G 1% /data [root@test01 ~]# ip add |grep 172.16 inet 172.16.1.111/24 brd 172.16.1.255 scope global eth1 inet 172.16.1.100/24 brd 172.16.1.255 scope global secondary eth1[root@test02 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 1.6G 7.4G 18% / tmpfs 242M 0 242M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot [root@test02 ~]# ip add |grep 172.16 inet 172.16.1.112/24 brd 172.16.1.255 scope global eth1[root@web ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 9.4G 2.1G 6.9G 24% / tmpfs 242M 0 242M 0% /dev/shm /dev/sda1 190M 36M 145M 20% /boot 172.16.1.100:/data 4.8G 11M 4.6G 1% /data/www/
測試腳本:
for ((n=1;n<30;n++)) do touch /data/www/$n sleep 1 done
期間關閉test01的heartbeat服務,並檢查最終測試結果
[root@web ~]# ls /data/www/ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 28 29
測試結果表明數據丟包率爲0,試驗成功