1. 機器準備
準備三臺機器,IP分別爲192.168.10.51、192.168.10.52、192.168.10.53;主機名分別爲centos51、centos52、centos53。三臺機器已經準備好docker swarm環境。docker swarm搭建可以參考另一篇文章 docker swarm 集羣搭建。
2. 準備鏡像。
在 https://hub.docker.com/ 上拉取zookeeper、kafka、kafka manager相關鏡像
docker pull zookeeper:3.6.1
docker pull wurstmeister/kafka:2.12-2.5.0
docker pull kafkamanager/kafka-manager:3.0.0.4
3. zookeeper 相關compose準備。
文件名:docker-stack-zookeeper.yml
version: "3.2"
services:
#zookeeper服務
zookeeper-server-a:
hostname: zookeeper-server-a
image: zookeeper:3.6.1
ports:
- "12181:2181"
networks:
swarm-net:
aliases:
- zookeeper-server-a
environment:
TZ: Asia/Shanghai
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
volumes:
- /data/kafka_cluster/zookeeper/data:/data
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos51]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
zookeeper-server-b:
hostname: zookeeper-server-b
image: zookeeper:3.6.1
ports:
- "22181:2181"
networks:
swarm-net:
aliases:
- zookeeper-server-b
environment:
TZ: Asia/Shanghai
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
volumes:
- /data/kafka_cluster/zookeeper/data:/data
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos52]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
zookeeper-server-c:
hostname: zookeeper-server-c
image: zookeeper:3.6.1
ports:
- "32181:2181"
networks:
swarm-net:
aliases:
- zookeeper-server-c
environment:
TZ: Asia/Shanghai
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
volumes:
- /data/kafka_cluster/zookeeper/data:/data
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos53]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
networks:
swarm-net:
external:
name: swarm-net
4. kafka 相關compose 準備
文件名: docker-stack-kafka.yml
version: "3.2"
services:
#kafka服務
kafka-server-a:
hostname: kafka-server-a
image: wurstmeister/kafka:2.12-2.5.0
ports:
- "19092:9092"
networks:
swarm-net:
aliases:
- kafka-server-a
environment:
- TZ=CST-8
- KAFKA_ADVERTISED_HOST_NAME=kafka-server-a
- HOST_IP=kafka-server-a
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
- KAFKA_BROKER_ID=0
- KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
volumes:
- /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-a
- /data/kafka_cluster/kafka/logs:/opt/kafka/logs
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos51]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
kafka-server-b:
hostname: kafka-server-b
image: wurstmeister/kafka:2.12-2.5.0
ports:
- "29092:9092"
networks:
swarm-net:
aliases:
- kafka-server-b
environment:
- TZ=CST-8
- KAFKA_ADVERTISED_HOST_NAME=kafka-server-b
- HOST_IP=kafka-server-b
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
- KAFKA_BROKER_ID=1
- KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
volumes:
- /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-b
- /data/kafka_cluster/kafka/logs:/opt/kafka/logs
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos52]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
kafka-server-c:
hostname: kafka-server-c
image: wurstmeister/kafka:2.12-2.5.0
ports:
- "39092:9092"
networks:
swarm-net:
aliases:
- kafka-server-c
environment:
- TZ=CST-8
- KAFKA_ADVERTISED_HOST_NAME=kafka-server-c
- HOST_IP=kafka-server-c
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
- KAFKA_BROKER_ID=2
- KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
volumes:
- /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-c
- /data/kafka_cluster/kafka/logs:/opt/kafka/logs
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos53]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
networks:
swarm-net:
external:
name: swarm-net
5. kafka manager相關compose準備
文件名: docker-stack-kafka-manager.yml
version: "3.2"
services:
#kafka manager服務
kafka-manager:
hostname: kafka-manager
image: kafkamanager/kafka-manager:3.0.0.4
ports:
- "19000:9000"
networks:
swarm-net:
aliases:
- kafka-manager
environment:
- ZK_HOSTS=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
deploy:
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints: [node.hostname == centos51]
resources:
limits:
# cpus: '1'
memory: 1GB
reservations:
# cpus: '0.2'
memory: 512M
networks:
avatar-net:
external:
name: swarm-net
6. 在三臺機器上創建文件映射路徑
mkdir -p {/data/kafka_cluster/zookeeper/data,/data/kafka_cluster/kafka/data,/data/kafka_cluster/kafka/logs}
chown -R 777 /data/kafka_cluster/
7. 執行compose
一定要按照順序執行,執行成功再執行下一個命令
docker stack deploy -c docker-stack-zookeeper.yml zoo --resolve-image=never --with-registry-auth
docker stack deploy -c docker-stack-kafka.yml kafka --resolve-image=never --with-registry-auth
docker stack deploy -c docker-stack-kafka-manager.yml kafka_manager --resolve-image=never --with-registry-auth