以下文章中涉及nginx 基於saltstack的搭建可參看的我的博客 saltstack 自動化部署 nginx(源碼編譯)
實驗拓撲以及涉及的文件目錄如下:
進行keepalived 安裝腳本的編寫
[root@server1 ~]# vim /srv/salt/keepalived/install.sls
include:
- pkgs.make # 此種包含着常的安裝依賴性
kp.install: # keepalived的源碼編譯
file.managed: # 文件加管理,向客戶端主機,推送源碼包
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run: # 執行解壓與編譯命令
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &>/dev/null && make &>/dev/null && make install &>/dev/null
- create: /usr/local/keepalived
# 在客戶端主機中有此文件後,將不再重複編譯過程
/etc/keepalived: # 進行keepalived配置文件目錄建立
file.directory:
- mode: 755
/etc/sysconfig/keepalived: # 建立軟連接,
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
進行keepalived 服務腳本的編寫
include:
- keepalived.install # 此腳本中包含install的內容
/etc/keepalived/keepalived.conf: # minion中要同步的文件
file.managed: # 文件管理模塊
- source: salt://keepalived/files/keepalived.conf # server上的源文件
- template: jinja # jinja 模塊
- context:
STATE: {{ pillar['state'] }}
VRID: {{ pillar['vrid'] }}
PRIORITY: {{ pillar['priority'] }}
kp-service:
file.managed: # 文件管理,管理keepalived啓動腳本
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- mode: 755
service.running:
- name: keepalived
- reload: True
- watch: # 監控keepalived 配置文件
- file: /etc/keepalived/keepalived.conf
keepalived 配置文件模版編寫
[root@server1 ~]# vim /srv/salt/keepalived/files/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
# vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{ STATE }} # 調用 keepalived.service腳本中的變量
interface eth0
virtual_router_id {{ VRID }} # 調用 keepalived.service腳本中的變量
priority {{ PRIORITY }} # 調用 keepalived.service腳本中的變量
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.21.100 # 設定vip
}
}
服務腳本中pillor的編寫
[root@server1 ~]# vim /srv/pillar/keepalived/install.sls
{% if grains['fqdn'] == 'server1' %}
webserver: keepalived
state: MASTER # 定義server1的state
vrid: 21 # 虛擬id
priority: 100 # 優先級
{% elif grains['fqdn'] == 'server4' %}
webserver: keepalived
state: BACKUP
vrid: 21
priority: 50
{% endif %}
進行pillar top文件的編寫
[root@server1 ~]# vim /srv/pillar/top.sls
base:
'*':
- web.install
- keepalived.install # 在全局聲明
salt top 文件的編寫
[root@server1 ~]# vim /srv/salt/top.sls
base:
'server1': # 在top 文件中進行聲明
- haproxy.install
- keepalived.service
'server4':
- haproxy.install
- keepalived.service
'server2':
- apache.service
'server3':
- nginx.service
編輯完成進行向各主機推送
[root@server1 ~]# salt '*' state.highstate
以上,簡單的http 服務器 基於 keepalived + haproxy 的高可用負載均衡集羣已自動搭建完成。若想擴展業務,只需要添加minion主機,並修改相應的文件腳本即可
進行haproxy的高可用
我們建立的上述集羣是有缺陷的,沒有對haproxy進行健康檢查的,若是haproxy宕機,keepalived服務正常啓動,還是無法做到對後端web服務器負載均衡
爲了解決這個缺陷,我們可以爲keepalived添加haproxy的監控腳本
腳本內容如下:(一個很粗糙的腳本)
[root@server1 files]# vim check_haproxy.sh
#!/bin/bash
/etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null
# 宕檢查haproxy的狀態,如果返回錯誤,則重啓haproy
if [ $? -ne 0 ];
then /etc/init.d/keepalived stop &> /dev/null
fi
如果返回值重啓後返回值不爲0,則停止keepalived服務
編輯keepalived 模版文件,並將內容推給各個keepalived主機
在keealived.conf 開頭加上監控腳本
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
# 腳本絕對路徑
interval 2
# 腳本刷新的時間
weight 2
}
......省略......
virtual_ipaddress {
172.25.21.100
}
track_script {
check_haproxy # 將腳本加到此處,以調用
}
將此腳本推送到各個keepalived主機中
[root@server1 keepalived]# salt '*' state.highstate
server2:
----------
......省略......
Summary for server2
------------
Succeeded: 2 # server2推送成功
Failed: 0
------------
Total states run: 2
Total run time: 495.607 ms
server3:
----------
......省略......
Summary for server3
------------
Succeeded: 9 # server3 推送成功
Failed: 0
------------
Total states run: 9
Total run time: 1.373 s
server4:
----------
......省略......
Summary for server4 # server4推送成功
-------------
Succeeded: 13 (changed=4)
Failed: 0
-------------
Total states run: 13
Total run time: 10.100 s
server1:
----------
......省略......
Summary for server1
-------------
Succeeded: 13 (changed=3) # server1推送成功
Failed: 0
-------------
Total states run: 13
Total run time: 10.529 s