keepalived+mongo集羣搭建高可用數據庫集羣

本文環境mongo=3.4, keepalived=2.0.5, docker=1.8
本次採用三個節點,搭建mongo集羣3sharding,每個sharding採用一主一從一仲裁, 其中搭建三個router節點,爲防止application與mongo集羣中router節點的連接出現單點故障,特搭建keepalived集羣,通過虛擬ip連接三個節點上的router節點,application連接虛擬生成的虛擬ip。

搭建步驟詳解

搭建mongo集羣

https://docs.mongodb.com/manual/tutorial/deploy-shard-cluster/#deploy-a-sharded-cluster
建議搭建看一遍官方文檔,寫的非常詳細

1、 配置文件

node1節點

version: '2.2'
services:
  configsrv:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    # command: mongod  --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    volumes:
      - /mnt/dataset/databases/mongo_meinian/configsrv208:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27020:27017"
    restart:
      always
    container_name:
      meinian_configsrv


  rs1_node:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    # command: mongod  --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs1_208:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27021:27017"
    restart:
      always
    container_name:
      meinian_rs1_node

  rs2_node:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    # command: mongod --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs2_208:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27022:27017"
    restart:
      always
    container_name:
      meinian_rs2_node
rs3_arbiter:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    # command: mongod  --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs3_arb:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27023:27017"
    restart:
      always
    container_name:
      meinian_rs3_arbiter

  router:
    image: mongo:3.4
    command: mongos --keyFile /opt/keyfile --configdb configrs/192.168.1.208:27020,192.168.1.209:27020,192.168.1.210:27020
    # command: mongos --configdb configrs/192.168.1.208:27018,192.168.1.209:27018,192.168.1.210:27018
    ports:
      - "27024:27017"
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_router208:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    restart:
      always
    container_name:
      meinian_router

node2節點

version: '2.2'
services:
  configsrv:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    # command: mongod --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    volumes:
      - /mnt/dataset/databases/mongo_meinian/configsrv209:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27020:27017"
    restart:
      always
    container_name:
      meinian_configsrv


  rs1_node:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    # command: mongod --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs1_209:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27021:27017"
    restart:
      always
    container_name:
      meinian_rs1_node


  rs2_arbiter:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    # command: mongod  --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs2_arb:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27022:27017"
    restart:
      always
    container_name:
      meinian_rs2_arbiter


rs3_node:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    # command: mongod --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs3_209:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    ports:
      - "27023:27017"
    restart:
      always
    container_name:
      meinian_rs3_node


  router:
    image: mongo:3.4
    command: mongos --keyFile /opt/keyfile --configdb configrs/192.168.1.208:27020,192.168.1.209:27020,192.168.1.210:27020
    # command: mongos --configdb configrs/ServerA:27020,ServerB:27020,ServerC:27020
    ports:
      - "27024:27017"
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_router209:/data/db
      - ./keyfile:/opt/keyfile
    healthcheck:
      test: "exit 0"
    restart:
      always
    container_name:
      meinian_router

node3節點

version: '2.2'
services:
  configsrv:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    # command: mongod  --oplogSize 1024 --replSet configrs --port 27017 --configsvr --wiredTigerCacheSizeGB 5
    volumes:
      - /mnt/dataset/databases/mongo_meinian/configsrv210:/data/db
      - ./keyfile:/opt/keyfile
    #network_mode: "host"
    healthcheck:
      test: "exit 0"
    ports:
      - "27020:27017"
    restart:
      always
    container_name:
      meinian_configsrv


  rs1_arbiter:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    # command: mongod  --oplogSize 10240 --replSet rs1 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs1_arb:/data/db
      - ./keyfile:/opt/keyfile
    # network_mode: "host"
    healthcheck:
      test: "exit 0"
    ports:
      - "27021:27017"
    restart:
      always
    container_name:
      meinian_rs1_arbiter


  rs2_node:
    image: mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    # command: mongod   --oplogSize 10240 --replSet rs2 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs2_210:/data/db
      - ./keyfile:/opt/keyfile
    # network_mode: "host"
    healthcheck:
      test: "exit 0"
    ports:
      - "27022:27017"
    restart:
      always
    container_name:
      meinian_rs2_node

  rs3_node:
    image:mongo:3.4
    command: mongod --keyFile /opt/keyfile --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    # command: mongod  --oplogSize 10240 --replSet rs3 --directoryperdb --port 27017 --shardsvr
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_rs3_210:/data/db
      - ./keyfile:/opt/keyfile
    # network_mode: "host"
    healthcheck:
      test: "exit 0"
    ports:
      - "27023:27017"
    restart:
      always
    container_name:
      meinian_rs3_node


  router:
    image: mongo:3.4
    command: mongos --keyFile /opt/keyfile --configdb configrs/192.168.1.208:27020,192.168.1.209:27020,192.168.1.210:27020
    # command: mongos  --configdb configrs/ServerA:27020,ServerB:27020,ServerC:27020
    ports:
      - "27024:27017"
    volumes:
      - /mnt/dataset/databases/mongo_meinian/meinian_router210:/data/db
      - ./keyfile:/opt/keyfile
    # network_mode: "host"
    healthcheck:
      test: "exit 0"
    restart:
      always
    container_name:
      meinian_router
2、生成keyfile文件

在執行docker-compose之前需要生成一份共用的keyfile文件,將生成的文件拷貝到三個節點上,掛載到docker容器中,啓動docker時添加命令–keyFile,keyfile的作用主要是區別那寫mongo實例是屬於集羣的

openssl rand -base64 741 > keyfile
chmod 600 keyfile
3、初始化config節點

在三個節點上運行下面命令,啓動各個容器,開始配置集羣信息,首先配置config節點

docker-compose up  -d

首先mongo連接任一config容器,以連接到node1節點爲例,創建管理用戶,mongo的管理用戶分爲

數據庫用戶角色
read: 只讀數據權限
readWrite:學些數據權限
數據庫管理角色
dbAdmin: 在當前db中執行管理操作的權限
dbOwner: 在當前db中執行任意操作
userADmin: 在當前db中管理user的權限
備份和還原角色
backup
restore
誇庫角色
readAnyDatabase: 在所有數據庫上都有讀取數據的權限
readWriteAnyDatabase: 在所有數據庫上都有讀寫數據的權限
userAdminAnyDatabase: 在所有數據庫上都有管理user的權限
dbAdminAnyDatabase: 管理所有數據庫的權限
集羣管理
clusterAdmin: 管理機器的最高權限
clusterManager: 管理和監控集羣的權限
clusterMonitor: 監控集羣的權限
hostManager: 管理Server
超級權限
root: 超級用戶

這裏創建擁有管理用戶權限和超級權限的用戶

use admin

db.createUser({user:"admin", pwd: "123456", , roles:[{role: "userAdminAnyDatabase", db: "admin"}]})

db.createUser({user:"root", pwd: "123456", , roles:[{role: "root", db: "admin"}]})

初始化ReplicaSet

rs.initiate(
  {
    _id: "configrs",
    configsvr: true,
    members: [
      { _id : 0, host : "node1:27020" },
      { _id : 1, host : "node2:27020" },
      { _id : 2, host : "node3:27020" }
    ]
  }
)
4、初始化sharding節點

三個sharding節點的初始化步驟一樣,以rs1爲例講解

use admin

db.createUser({user:"admin", pwd: "123456", , roles:[{role: "userAdminAnyDatabase", db: "admin"}]})

db.createUser({user:"root", pwd: "123456", , roles:[{role: "root", db: "admin"}]})

初始化ReplicaSet

rs.initiate(
  {
    _id : "rs1",
    members: [
      { _id : 0, host : "node1:27021" },
      { _id : 1, host : "node2:27021" }
    ]
  }
)

添加仲裁節點

rs.addArb("node3:27021")

其他節點同rs1操作步驟相同

5、配置router節點

首先啓動三個節點的router容器
mongo連接到任一的router節點,將三個sharding加入到集羣中

use admin
# 用戶名爲config中註冊的用戶名
db.auth({"<username>": "password"})
# 將三個分片添加到集羣中
sh.addShard("rs1/node1:27021")
sh.addShard("rs2/node3:27022")
sh.addShard("rs3/node2:27023")

在對數據庫進行分片之前,需要對目標數據庫進行以下命令

sh.enableSharding("<database>")
sh.shardCollection("<database>.<collection>", { <key> : <direction> } )
搭建keepalived集羣

在mongo集羣中,在三個節點裏分別搭建了mongos實例,三個實例連接任意一個都可以使用,爲防止application連接單個mongos節點易出現的單點故障,因此搭建了keepalived

1、 配置文件

node1節點 搭建keepalived主節點
keepalived.conf

vrrp_instance VI_1 {
    state MASTER                                          # master節點
    interface enp6s0          # 使用的網卡
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        # 192.168.1.222                     
        192.168.1.101/24 dev enp6s0 label eth0:1       # 虛擬ip
   }

virtual_server 192.168.1.101 27000 {
    delay_loop 6
    lb_algo rr
    lb_kind NAT
    nat_mask 255.255.255.0
    persistence_timeout 50
    protocol TCP
    real_server 192.168.1.208 27024 {                                       # 真實ip+port
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }
    real_server 192.168.1.209 27024 {
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }
    real_server 192.168.1.210 27024 {
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }

}

docker-compose.yml

version: '2.1'

services:
  keepalived_master:
    image:  arcts/keepalived                                  # 注意使用的image
    restart: always
    # privileged: true
    cap_add:
      - NET_ADMIN
    volumes:
      - ./keepalived.conf:/keepalived.conf
    network_mode: "host"
    healthcheck:
      test: "exit 0"
    environment:
      KEEPALIVED_AUTOCONF: "false"
      KEEPALIVED_CONF: "/etc/keepalived/keepalived.conf"
      KEEPALIVED_CMD: /usr/sbin/keepalived -n -l -f /keepalived.conf

node2節點 搭建keepalived備用節點
keepalived.conf

vrrp_instance VI_1 {
    state BACKUP
    interface enp6s0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        # 192.168.1.222
        192.168.1.101/24 dev enp6s0 label eth0:1
   }

virtual_server 192.168.1.101 27000 {
    delay_loop 6
    lb_algo rr
    lb_kind NAT
    nat_mask 255.255.255.0
    persistence_timeout 50
    protocol TCP
    real_server 192.168.1.208 27024 {
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }
    real_server 192.168.1.209 27024 {
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }
    real_server 192.168.1.210 27024 {
        weight 1
        TCP_CHECK {
        connect_timeout 8
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
    }
    }

}

docker-compose.yml

version: '2.1'

services:
  keepalived_back_up:
    image:  arcts/keepalived                                  # 注意使用的image
    restart: always
    # privileged: true
    cap_add:
      - NET_ADMIN
    volumes:
      - ./keepalived.conf:/keepalived.conf
    network_mode: "host"
    healthcheck:
      test: "exit 0"
    environment:
      KEEPALIVED_AUTOCONF: "false"
      KEEPALIVED_CONF: "/etc/keepalived/keepalived.conf"
      KEEPALIVED_CMD: /usr/sbin/keepalived -n -l -f /keepalived.conf

啓動容器後,查看是否成功

ip a

如果enp6s0網卡下面有兩個ip地址,則執行成功

到此keepalived+mongo集羣的搭建就完成了,本篇博客沒有對原理和參數概念進行過多解釋,因爲我覺得官網上的內容解釋已經非常詳細了,想深入瞭解的夥伴一定要看官網

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章