centos7下docker-compose安裝kafka集羣

docker-compose安裝kafka集羣

0 安裝docker(略)

1. 安裝docker-compose

  1. 下載安裝文件
curl -L https://github.com/docker/compose/releases/download/1.24.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
  1. 添加權限
chmod +x /usr/local/bin/docker-compose
  1. 查看版本
docker-compose --version

2. 安裝zookeeper及kafka鏡像

  1. 查看鏡像
docker search zookeeper
docker search kafka
  1. 下載鏡像
docker pull wurstmeister/zookeeper
docker pull wurstmeister/kafka
docker pull sheepkiller/kafka-manager #管理工具

3. 創建必要文件及文件夾(docker-compose.yml同一目錄下)

  1. kafka文件夾
mkdir kafka1
mkdir kafka2
mkdir kafka3
  1. zookeeper文件夾
mkdir zookeeper1
mkdir zookeeper2
mkdir zookeeper3
  1. zookeeper配置文件
mkdir zooConfig
cd zooConfig
mkdir zoo1
mkdir zoo2
mkdir zoo3
  1. 在zoo1,zoo2,zoo3中分別創建myid文件,並寫入分別寫入id數字,如zoo1中的myid中寫入1

  2. 創建zoo配置文件zoo.cfg

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/opt/zookeeper-3.4.13/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
server.1= 172.23.0.11:2888:3888
server.2= 172.23.0.12:2888:3888
server.3= 172.23.0.13:2888:3888

4. 創建網絡

docker network create --driver bridge --subnet 172.23.0.0/25 --gateway 172.23.0.1  zookeeper_network

4. 創建docker-compose.yml文件

version: '2'

services:

  zoo1:
    image: wurstmeister/zookeeper
    restart: always
    container_name: zoo1
    hostname: zoo1
    ports:
    - "2181:2181"
    volumes:
    - "./zookeeper1/data:/data"
    - "./zookeeper1/datalog:/datalog"
    - "./zooConfig/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg"
    - "./zooConfig/zoo1/myid:/opt/zookeeper-3.4.13/data/myid"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
    networks:
      default:
        ipv4_address: 172.23.0.11

  zoo2:
    image: wurstmeister/zookeeper
    restart: always
    container_name: zoo2
    hostname: zoo2
    ports:
    - "2182:2181"
    volumes:
    - "./zookeeper2/data:/data"
    - "./zookeeper2/datalog:/datalog"
    - "./zooConfig/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg"
    - "./zooConfig/zoo2/myid:/opt/zookeeper-3.4.13/data/myid"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
    networks:
      default:
        ipv4_address: 172.23.0.12

  zoo3:
    image: wurstmeister/zookeeper
    restart: always
    container_name: zoo3
    hostname: zoo3
    ports:
    - "2183:2181"
    volumes:
    - "./zookeeper3/data:/data"
    - "./zookeeper3/datalog:/datalog"
    - "./zooConfig/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg"
    - "./zooConfig/zoo3/myid:/opt/zookeeper-3.4.13/data/myid"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
    networks:
      default:
        ipv4_address: 172.23.0.13

  kafka1:
    image: wurstmeister/kafka
    restart: always
    container_name: kafka1
    hostname: kafka1
    ports:
    - 9092:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.18.255.9:9092
      KAFKA_ADVERTISED_HOST_NAME: kafka1
      KAFKA_HOST_NAME: kafka1
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_BROKER_ID: 0
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
    volumes:
    - /etc/localtime:/etc/localtime
    - "./kafka1/logs:/kafka"
    links:
    - zoo1
    - zoo2
    - zoo3
    networks:
      default:
        ipv4_address: 172.23.0.14

  kafka2:
    image: wurstmeister/kafka
    restart: always
    container_name: kafka2
    hostname: kafka2
    ports:
    - 9093:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.18.255.9:9093
      KAFKA_ADVERTISED_HOST_NAME: kafka2
      KAFKA_HOST_NAME: kafka2
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_BROKER_ID: 1
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093
    volumes:
    - /etc/localtime:/etc/localtime
    - "./kafka2/logs:/kafka"
    links:
    - zoo1
    - zoo2
    - zoo3
    networks:
      default:
        ipv4_address: 172.23.0.15

  kafka3:
    image: wurstmeister/kafka
    restart: always
    container_name: kafka3
    hostname: kafka3
    ports:
    - 9094:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.18.255.9:9094
      KAFKA_ADVERTISED_HOST_NAME: kafka3
      KAFKA_HOST_NAME: kafka3
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_ADVERTISED_PORT: 9094
      KAFKA_BROKER_ID: 2
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094
    volumes:
    - /etc/localtime:/etc/localtime
    - "./kafka3/logs:/kafka"
    links:
    - zoo1
    - zoo2
    - zoo3
    networks:
      default:
        ipv4_address: 172.23.0.16

  kafka-manager:
    image: sheepkiller/kafka-manager
    restart: always
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
    - 9000:9000
    links:
    - kafka1
    - kafka2
    - kafka3
    - zoo1
    - zoo2
    - zoo3
    environment:
      ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_BROKERS: kafka1:9092,kafka2:9092,kafka3:9092
      APPLICATION_SECRET: letmein
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    networks:
      default:
        ipv4_address: 172.23.0.10

networks:
  default:
    external:
      name: zookeeper_network

6. 啓停集羣

  1. 啓動集羣
docker-compose -f docker-compose.yml up -d
  1. 停止集羣
docker-compose -f docker-compose.yml stop
  1. 單個節點停止
docker rm -f zoo1

7. 查看zookeeper集羣是否正常

docker exec -it zoo1 bash
bin/zkServer.sh status # mode 爲leader或follower正常

8. 創建topic

  1. 驗證,每個list理論上都可以看到新建的topic
docker exec -it kafka1 bash
kafka-topics.sh --create --zookeeper zoo1:2181 --replication-factor 1 --partitions 3 --topic test001
kafka-topics.sh --list --zookeeper zoo1:2181
kafka-topics.sh --list --zookeeper zoo2:2181
kafka-topics.sh --list --zookeeper zoo3:2181
  1. 生產消息
kafka-console-producer.sh --broker-list kafka1:9092,kafka2:9093,kafka3:9094 --topic test001
  1. 消費消息
kafka-console-consumer.sh --bootstrap-server kafka1:9092,kafka2:9093,kafka3:9094 --topic test001 --from-beginning

9. 防火牆開啓相關端口

firewall-cmd --add-ports=9000/tcp --permernent
firewall-cmd --reload
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章