-
一個封閉的程序組(A closed process group communication model)通信模式,這個模式提供一種虛擬的同步方式來保證能夠複製服務器的狀態。
-
一個簡單可用性管理組件(A simple availability manager),這個管理組件可以重新啓動應用程序的進程當它失敗後。
-
一個配置和內存數據的統計(A configuration and statistics in-memory database),內存數據能夠被設置,回覆,接受通知的更改信息。
-
一個定額的系統(A quorum system),定額完成或者丟失時通知應用程序。
-
corosync 1.x –> corosync 1.4.6(最新) 代號:flatiron
-
corosync 2.x –> corosync 2.3.1(最新) 代號:needle
-
CentOS 6.4 X86_64位系統
-
corosync-1.4.1-15.el6_4.1.x86_64
-
pacemaker-1.1.8-7.el6.x86_64
1
2
3
4
5
6
7
8
9
|
[root@node1
~] #
uname -n node1. test .com [root@node1
~] #
vim /etc/hosts 127.0.0.1
localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1
localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.18.201
node1. test .com
node1 192.168.18.202
node2. test .com
node2 [root@node1
~] #
ping node1 [root@node1
~] #
ping node2 |
1
|
[root@node1
~] #
ntpdate 210.72.145.44 |
1
2
|
[root@node1
~] #
ssh-keygen -t rsa -f ~/.ssh/id_rsa -P '' [root@node1
~] #
ssh-copy-id -i .ssh/id_rsa.pub [email protected] |
1
2
3
4
5
6
7
8
9
|
[root@node2
~] #
uname -n node2. test .com [root@node2
~] #
vim /etc/hosts 127.0.0.1
localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1
localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.18.201
node1. test .com
node1 192.168.18.202
node2. test .com
node2 [root@node2~] #
ping node1 [root@node2
~] #
ping node2 |
1
|
[root@node2
~] #
ntpdate 210.72.145.44 |
1
2
|
[root@node2
~] #
ssh-keygen -t rsa -f ~/.ssh/id_rsa -P '' [root@node2
~] #
ssh-copy-id -i .ssh/id_rsa.pub [email protected] |
1
2
3
4
5
6
7
|
[root@node1
src] #
wget http://download.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm [root@node1
src] #
rpm -ivh epel-release-5-4.noarch.rpm warning:
epel-release-5-4.noarch.rpm: Header V3 DSA signature: NOKEY, key ID 217521f6 Preparing... ###########################################
[100%] 1:epel-release ###########################################
[100%] [root@node1
src] #
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 [root@node1
src] #
yum list |
1
2
3
4
5
6
7
|
[root@node2
src] #
wget http://download.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm [root@node2
src] #
rpm -ivh epel-release-5-4.noarch.rpm warning:
epel-release-5-4.noarch.rpm: Header V3 DSA signature: NOKEY, key ID 217521f6 Preparing... ###########################################
[100%] 1:epel-release ###########################################
[100%] [root@node2
src] #
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 [root@node2
src] #
yum list |
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@node1
~] #
service iptables stop [root@node1
~] #
vim /etc/selinux/config #
This file controls the state of SELinux on the system. #
SELINUX= can take one of these three values: #
enforcing - SELinux security policy is enforced. #
permissive - SELinux prints warnings instead of enforcing. #
disabled - SELinux is fully disabled. SELINUX=disabled #
SELINUXTYPE= type of policy in use. Possible values are: #
targeted - Only targeted network daemons are protected. #
strict - Full SELinux protection. SELINUXTYPE=targeted |
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@node2
~] #
service iptables stop [root@node2
~] #
vim /etc/selinux/config #
This file controls the state of SELinux on the system. #
SELINUX= can take one of these three values: #
enforcing - SELinux security policy is enforced. #
permissive - SELinux prints warnings instead of enforcing. #
disabled - SELinux is fully disabled. SELINUX=disabled #
SELINUXTYPE= type of policy in use. Possible values are: #
targeted - Only targeted network daemons are protected. #
strict - Full SELinux protection. SELINUXTYPE=targeted |
1
2
|
[root@node1
~] #
yum install -y corosync* [root@node1
~] #
yum install -y pacemaker* |
1
2
|
[root@node2
~] #
yum install -y corosync* [root@node2
~] #
yum install -y pacemaker* |
1
2
3
4
5
6
7
|
[root@node1
~] #
cd /etc/corosync/ [root@node1
corosync] #
ll 總用量
24 -rw-r--r--
1 root root 445 5月 15 05:09 corosync.conf.example -rw-r--r--
1 root root 1084 5月 15 05:09 corosync.conf.example.udpu drwxr-xr-x
2 root root 4096 5月 15 05:09 service.d drwxr-xr-x
2 root root 4096 5月 15 05:09 uidgid.d |
1
|
[root@node1
corosync] #
cp corosync.conf.example corosync.conf |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
[root@node1
corosync] #
cat corosync.conf #
Please read the corosync.conf.5 manual page compatibility:
whitetank totem
{ version:
2 secauth:
on #啓動認證 threads:
2 interface
{ ringnumber:
0 bindnetaddr:
192.168.18.0 #修改心跳線網段 mcastaddr:
226.99.10.1 #組播傳播心跳信息 mcastport:
5405 ttl:
1 } } logging
{ fileline:
off to_stderr:
no to_logfile: yes to_syslog:
no logfile: /var/log/cluster/corosync .log #日誌位置 debug:
off timestamp:
on logger_subsys
{ subsys:
AMF debug:
off } } amf
{ mode:
disabled } #啓用pacemaker service
{ ver:
0 name:
pacemaker } aisexec
{ user:
root group:
root } |
1
2
3
4
5
6
7
|
[root@node1
corosync] #
mv /dev/{random,random.bak} [root@node1
corosync] #
ln -s /dev/urandom /dev/random [root@node1
corosync] #
corosync-keygen Corosync
Cluster Engine Authentication key generator. Gathering
1024 bits for key
from /dev/random . Press
keys on your keyboard to generate entropy. Writing
corosync key to /etc/corosync/authkey . |
1
2
3
4
5
6
7
8
|
[root@node1
corosync] #
ll 總用量
24 -r--------
1 root root 128 8月 13 14:16 authkey -rw-r--r--
1 root root 521 8月 13 11:11 corosync.conf -rw-r--r--
1 root root 445 5月 15 05:09 corosync.conf.example -rw-r--r--
1 root root 1084 5月 15 05:09 corosync.conf.example.udpu drwxr-xr-x
2 root root 4096 5月 15 05:09 service.d drwxr-xr-x
2 root root 4096 5月 15 05:09 uidgid.d |
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@node1
corosync] #
scp -p authkey corosync.conf node2:/etc/corosync/ authkey
100% 128 0.1KB /s 00:00 corosync.conf
100% 521 0.5KB /s 00:00 [root@node2
~] #
cd /etc/corosync/ [root@node2
corosync] #
ll 總用量
24 -r--------
1 root root 128 8月 13 14:16 authkey -rw-r--r--
1 root root 521 8月 13 11:11 corosync.conf -rw-r--r--
1 root root 445 5月 15 05:09 corosync.conf.example -rw-r--r--
1 root root 1084 5月 15 05:09 corosync.conf.example.udpu drwxr-xr-x
2 root root 4096 5月 15 05:09 service.d drwxr-xr-x
2 root root 4096 5月 15 05:09 uidgid.d |
1
2
3
4
|
[root@node1
~] #
ssh node2 "service corosync start" Starting
Corosync Cluster Engine (corosync): [確定] [root@node1
~] #
service corosync start Starting
Corosync Cluster Engine (corosync): [確定] |
1
2
3
4
5
6
|
[root@node1
~] #
grep -e "Corosync Cluster Engine" -e "configuration file" /var/log/cluster/corosync.log Aug
13 14:20:15 corosync [MAIN ] Corosync Cluster Engine ( '1.4.1' ):
started and ready to provide service. Aug
13 14:20:15 corosync [MAIN ] Successfully read main
configuration file '/etc/corosync/corosync.conf' . Aug
13 17:08:51 corosync [MAIN ] Corosync Cluster Engine ( '1.4.1' ):
started and ready to provide service. Aug
13 17:08:51 corosync [MAIN ] Successfully read main
configuration file '/etc/corosync/corosync.conf' . Aug
13 17:08:51 corosync [MAIN ] Corosync Cluster Engine exiting with status 18 at main.c:1794. |
1
2
3
4
5
6
|
[root@node1
~] #
grep TOTEM /var/log/cluster/corosync.log Aug
13 14:20:15 corosync [TOTEM ] Initializing transport (UDP /IP Multicast). Aug
13 14:20:15 corosync [TOTEM ] Initializing transmit /receive security:
libtomcrypt SOBER128 /SHA1HMAC (mode
0). Aug
13 14:20:15 corosync [TOTEM ] The network interface [192.168.18.201] is now up. Aug
13 14:20:15 corosync [TOTEM ] A processor joined or left the membership and a new membership was formed. Aug
13 14:20:40 corosync [TOTEM ] A processor joined or left the membership and a new membership was formed. |
1
2
3
|
[root@node1
~] #
grep ERROR: /var/log/cluster/corosync.log Aug
13 14:20:15 corosync [pcmk ] ERROR: process_ais_conf: You have configured a cluster using the Pacemaker plugin for Corosync.
The plugin is not supported in this
environment and will be removed very soon. Aug
13 14:20:15 corosync [pcmk ] ERROR: process_ais_conf: Please see Chapter 8 of 'Clusters
from Scratch' (http: //www .clusterlabs.org /doc ) for details
on using Pacemaker with CMAN |
1
2
3
4
5
6
|
[root@node1
~] #
grep pcmk_startup /var/log/cluster/corosync.log Aug
13 14:20:15 corosync [pcmk ] info: pcmk_startup: CRM: Initialized Aug
13 14:20:15 corosync [pcmk ] Logging: Initialized pcmk_startup Aug
13 14:20:15 corosync [pcmk ] info: pcmk_startup: Maximum core file size
is: 18446744073709551615 Aug
13 14:20:15 corosync [pcmk ] info: pcmk_startup: Service: 9 Aug
13 14:20:15 corosync [pcmk ] info: pcmk_startup: Local hostname :
node1. test .com |
1
2
3
4
5
6
7
8
9
|
[root@node1
~] #
crm_mon Last
updated: Tue Aug 13 17:41:31 2013 Last
change: Tue Aug 13 14:20:40 2013 via crmd on node1. test .com Stack:
classic openais (with plugin) Current
DC: node2. test .com
- partition with quorum Version:
1.1.8-7.el6-394e906 2
Nodes configured, 2 expected votes 0
Resources configured. Online:
[ node1. test .com
node2. test .com
] |