實驗環境:vmware workstation
os:Centos5.8 x86_64
編輯兩臺虛擬機分別新增一塊網卡作爲心跳檢測,新增一塊4G的硬盤,大小保持一致
兩臺機器的基本情況如下所示:
centos1.mypharma.com 192.168.150.100,心跳線爲:10.10.10.2(VM2網段)
centos2.mypharma.com 192.168.150.101,心跳線爲:10.10.10.3(VM2網段)
heartbeat的vip爲 192.168.150.128
一、實驗前的準備工作
①drbd1的hosts文件內容如下所示:
[root@centos1 ~]# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
192.168.150.100 centos1.mypharma.com
192.168.150.101 centos2.mypharma.com
②drbd1的hostname:
[root@centos1 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=yes
HOSTNAME=centos1.mypharma.com
③關閉iptables、SElinux
[root@centos1 ~]# setenforce 0
setenforce: SELinux is disabled
[root@centos1 ~]# service iptables stop
④檢查磁盤
[root@centos1 ~]# fdisk -l
Disk /dev/sda: 85.8 GB, 85899345920 bytes
255 heads, 63 sectors/track, 10443 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 25 200781 83 Linux
/dev/sda2 26 1057 8289540 82 Linux swap / Solaris
/dev/sda3 1058 10443 75393045 83 Linux
Disk /dev/sdb: 4294 MB, 4294967296 bytes
255 heads, 63 sectors/track, 522 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn't contain a valid partition table
dbrd2做相同的操作。
二、DRBD的安裝
dbrd1
yum -y install drbd83 kmod-drbd83
modprobe drbd
[root@centos1 ~]# lsmod | grep drbd
drbd 321608 0
dbrd2
yum -y install drbd83 kmod-drbd83
modprobe drbd
[root@centos2 ~]# lsmod | grep drbd
drbd 321608 0
如果能正確顯示,表明DRBD已經安裝成功
兩臺機器的drbd.conf配置文件內容如下所示(兩臺機器的配置是一樣的):
[root@centos1 ~]# cat /etc/drbd.conf
global {
usage-count no;
}
common {
syncer { rate 30M; }
}
resource r0 {
protocol C;
handlers {
pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
# fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
# split-brain "/usr/lib/drbd/notify-split-brain.sh root";
# out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
# before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
# after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
}
startup {
# wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
wfc-timeout 120;
degr-wfc-timeout 120;
}
disk {
# on-io-error fencing use-bmbv no-disk-barrier no-disk-flushes
# no-disk-drain no-md-flushes max-bio-bvecs
on-io-error detach;
}
net {
# sndbuf-size rcvbuf-size timeout connect-int ping-int ping-timeout max-buffers
# max-epoch-size ko-count allow-two-primaries cram-hmac-alg shared-secret
# after-sb-0pri after-sb-1pri after-sb-2pri data-integrity-alg no-tcp-cork
max-buffers 2048;
cram-hmac-alg "sha1";
shared-secret "123456";
#allow-two-primaries;
}
syncer {
rate 30M;
# rate after al-extents use-rle cpu-mask verify-alg csums-alg
}
on centos1.mypharma.com {
device /dev/drbd0;
disk /dev/sdb;
address 10.10.10.2:7788;
meta-disk internal;
}
on centos2.mypharma.com {
device /dev/drbd0;
disk /dev/sdb;
address 10.10.10.3:7788;
meta-disk internal;
}
}
創建DRBD元數據信息
[root@centos1 ~]# drbdadm create-md r0
Writing meta data...
initializing activity log
NOT initialized bitmap
New drbd meta data block successfully created.
將centos1的機器作爲DRBD的Primary機器,命令如下所示:
[root@centos1 ~]# drbdsetup /dev/drbd0primary -o
[root@centos1 ~]# drbdadm primary r0
[root@centos1 ~]# mkfs.ext3 /dev/drbd0
[root@centos1 ~]# mkdir -p /drbd
[root@centos1 ~]# mount /dev/drbd0 /drbd
[root@centos1 ~]# chkconfig drbd on
centos2機器
[root@centos2 ~]# mkdir -p /drbd
[root@centos2 ~]# chkconfig drbd on
三、Heartbeat的安裝和部署。
兩臺機器上分別用yum來安裝heartbeat,如下命令操作二次:
yum -y install heartbeat
①編輯/etc/ha.d/ha.cf
drbd1
[root@centos1 ~]# cat /etc/ha.d/ha.cf
logfile /var/log/ha-log
logfacility local0
keepalive 2
deadtime 15
ucast eth1 10.10.10.3
auto_failback off
node centos1.mypharma.com centos2.mypharma.com
drbd2
[root@centos2 ~]# cat /etc/ha.d/ha.cf
logfile /var/log/ha-log
logfacility local0
keepalive 2
deadtime 15
ucast eth1 10.10.10.2
auto_failback off
node centos1.mypharma.com centos2.mypharma.com
②編輯雙機互連驗證文件authkeys
drbd1
[root@centos1 ~]# cat /etc/ha.d/authkeys
auth 1
1 crc
[root@centos1 ~]# chmod 600 /etc/ha.d/authkeys
drbd2
[root@centos2 ~]# cat /etc/ha.d/authkeys
auth 1
1 crc
[root@centos2 ~]# chmod 600 /etc/ha.d/authkeys
③編輯集羣資源文件/etc/ha.d/haresources
drbd1
[root@centos1 ~]# cat /etc/ha.d/haresources
centos1.mypharma.com IPaddr::192.168.150.128/24/eth0 drbddisk::r0 Filesystem::/dev/drbd0::/drbd::ext3 killnfsd
drbd2
[root@centos2 ~]# cat /etc/ha.d/haresources
centos1.mypharma.com IPaddr::192.168.150.128/24/eth0 drbddisk::r0 Filesystem::/dev/drbd0::/drbd::ext3 killnfsd
④編輯/etc/ha.d/resource.d/killnfsd
drbd1
[root@centos1 ~]# cat /etc/ha.d/resource.d/killnfsd
killall -9 nfsd;/etc/init.d/nfs restart;exit 0
[root@centos1 ~]# chmod +x /etc/ha.d/resource.d/killnfsd
drbd2
[root@centos2 ~]# cat /etc/ha.d/resource.d/killnfsd
killall -9 nfsd;/etc/init.d/nfs restart;exit 0
[root@centos2 ~]# chmod +x /etc/ha.d/resource.d/killnfsd
⑤主從機器上面配置下nfs服務的/etc/exports,其文件內容如下:
/drbd 192.168.150.0/255.255.255.0(rw,sync,no_root_squash,no_all_squash)
service portmap start
chkconfig portmap on
在兩臺機器上將DRBD和Heartbeat都設成自啓動方式。
service drbd start
chkcfonig drbd on
service heartbeat start
chkconfig heartbeat on