haproxy+pacemaker高可用負載均衡

一、haproxy

1.server1

1)安裝、配置haproxy
[root@server1 ~]# yum install haproxy -y
[root@server1 ~]# vim /etc/haproxy/haproxy.cfg 
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     65535
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 8000
    stats uri               /admin/stats
    monitor-uri             /monitoruri

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  main *:5000
#    acl url_static       path_beg       -i /static /images /javascript /stylesheets
#    acl url_static       path_end       -i .jpg .gif .png .css .js

#    use_backend static          if url_static
    bind                    172.25.29.100:80 
    default_backend         static

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static1 172.25.29.2:80 check
    server      static2 172.25.29.3:80 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
#backend app
#    balance     roundrobin
#    server  app1 127.0.0.1:5001 check
#    server  app2 127.0.0.1:5002 check
#    server  app3 127.0.0.1:5003 check
#    server  app4 127.0.0.1:5004 check

[root@server1 ~]# ip addr add 172.25.29.100/24 dev eth0
2)修改limit值
[root@server1 ~]# vim /etc/security/limits.conf
haproxy          -       nofile          65535
[root@server1 ~]# /etc/init.d/haproxy start
3)測試

在瀏覽器輸入:

http://172.25.29.100    #按F5發現可以負載均衡,所用算法爲輪詢
http://172.25.29.100/admin/stats   #自帶健康檢查
http://172.25.29.100/monitoruri

2.server4

1)安裝haproxy
[root@server4 ~]# yum install haproxy -y

然後server1發送haproxy.cfg至server4

[root@server1 ~]# scp /etc/haproxy/haproxy.cfg 172.25.29.4:/etc/haproxy/haproxy.cfg

添加vip並啓動

[root@server4 ~]# ip addr add 172.25.29.100/24 dev eth0
[root@server4 ~]# /etc/init.d/haproxy start

二、pacemaker

1.server1

安裝pacemaker、corosync、crmsh

[root@server1 ~]# yum install -y pacemaker corosync
[root@server1 ~]# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm --nodeps --force
[root@server1 ~]# cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf
[root@server1 ~]# vim /etc/corosync/corosync.conf
# Please read the corosync.conf.5 manual page
compatibility: whitetank

totem {
    version: 2
    secauth: off
    threads: 0
    interface {
        ringnumber: 0
        bindnetaddr: 172.25.29.0
        mcastaddr: 226.94.1.1
        mcastport: 5405
        ttl: 1
    }
}

logging {
    fileline: off
    to_stderr: no
    to_logfile: yes
    to_syslog: yes
    logfile: /var/log/cluster/corosync.log
    debug: off
    timestamp: on
    logger_subsys {
        subsys: AMF
        debug: off
    }
}

amf {
    mode: disabled
}

service {
        ver: 0
        name: pacemaker
}

aisexce {
    user: root
    group: root
}

quorum {
    provider: corosync_votequorum
    expected_votes: 2
    two_node: 1
}

[root@server1 ~]# /etc/init.d/corosync start

2.server4

安裝pacemaker、corosync、crmsh

[root@server4 ~]# yum install -y pacemaker corosync
[root@server4 ~]# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm --nodeps --force
[root@server1 ~]# scp /etc/corosync/authkey /etc/corosync/corosync.conf 172.25.29.4:/etc/corosync/
[root@server4 ~]# /etc/init.d/corosync start
[root@server1 ~]# crm status
Last updated: Tue Apr 16 14:01:12 2018
Last change: Tue Apr 16 14:00:51 2018 via crmd on server1
Stack: classic openais (with plugin)
Current DC: server1 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
0 Resources configured

Online: [ server1 server4 ]
[root@server4 ~]# crm status
Last updated: Tue Apr 16 14:01:03 2018
Last change: Tue Apr 16 14:00:51 2018 via crmd on server1
Stack: classic openais (with plugin)
Current DC: server1 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
0 Resources configured

Online: [ server1 server4 ]

3.server1

[root@server1 ~]# crm configure property no-quorum-policy="ignore" #只有兩個節點所以不競選
[root@server1 ~]# crm configure property stonith-enabled=false   #禁用stonith
[root@server1 ~]# crm configure #添加vip
crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=30s
crm(live)configure# primitive vip ocf:heartbeat:IPaddr params ip=172.25.29.100 nic='eth0' cidr_netmask='24'
crm(live)configure# group web vip haproxy
crm(live)configure# commit
crm(live)configure# Ctrl-C, leaving
[root@server1 ~]# crm configure show
node server1
node server4
primitive haproxy lsb:haproxy \
    op monitor interval="30s"
primitive vip ocf:heartbeat:IPaddr \
    params ip="172.25.29.100" nic="eth0" cidr_netmask="24"
group web vip haproxy
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2" \
    no-quorum-policy="ignore" \
    stonith-enabled="false"
[root@server1 ~]# crm status
...
Online: [ server1 server4 ]

 Resource Group: web
     vip    (ocf::heartbeat:IPaddr):    Started server1 
     haproxy    (lsb:haproxy):  Started server1

4.測試

[root@server1 ~]# crm node standby   #down掉server1
[root@server1 ~]# crm status
...
Node server1: standby
Online: [ server4 ]

 Resource Group: web
     vip    (ocf::heartbeat:IPaddr):    Started server4 
     haproxy    (lsb:haproxy):  Started server4
[root@server1 ~]# crm node online  #up server1
[root@server4 ~]# crm node standby #down掉server4
[root@server4 ~]# crm status
...
Node server4: standby
Online: [ server1 ]

 Resource Group: web
     vip    (ocf::heartbeat:IPaddr):    Started server1 
     haproxy    (lsb:haproxy):  Started server1
[root@server1 ~]# crm node standby  #兩臺一起down掉
[root@server1 ~]# crm status
...
Node server1: standby
Node server4: standby
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章