单一服务器部署kafka集群 - housekeeper-software/tech GitHub Wiki

使用批处理

#!/bin/bash

if [ -d "kafka" ]; then
    cd ./kafka
    docker-compose kill
    docker-compose rm -f
    cd ..
else
    mkdir kafka
fi

if [ -d "zookeeper" ]; then
    cd ./zookeeper
    docker-compose kill
    docker-compose rm -f
    cd ..
else
    mkdir zookeeper
fi

rm -rf /usr/local/kafka
rm -rf /usr/local/zookeeper

brokers="broker1 broker2 broker3"
for m in $brokers; do
    mkdir -p /usr/local/kafka/$m/config
    cp ./log4j.properties /usr/local/kafka/$m/config
done

cp ./zk_docker-compose.yml ./zookeeper/docker-compose.yml
cp ./kafka_docker-compose.yml ./kafka/docker-compose.yml

network=$(docker network ls -f name=zoo_kafka | grep -v grep | wc -l)
if [ $network -lt 2 ]; then
    echo "create bridge zoo_kafka"
    docker network create --driver bridge --subnet 172.23.0.0/16 --gateway 172.23.0.1 zoo_kafka
else
    echo "bridge zoo_kafa exist already"
fi

cd ./zookeeper
docker-compose up -d

cd ..
cd ./kafka
docker-compose up -d
cd ..
echo "DONE"


zk_docker-compose.yml

version: '3.1'
services:
 zoo1:
  image: zookeeper:latest # 镜像名称
  restart: always # 当发生错误时自动重启
  hostname: zoo1
  container_name: zoo1
  privileged: true
  ports: # 端口
   - 2181:2181
  volumes: # 挂载数据卷
   - /usr/local/zookeeper/zoo1/data:/data
   - /usr/local/zookeeper/zool/datalog:/datalog
   - /etc/localtime:/etc/localtime   
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 1 # 节点ID
   ZOO_PORT: 2181 # zookeeper端口号
   ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 # zookeeper节点列表
  networks:
   default:
    ipv4_address: 172.23.0.11

 zoo2:
  image: zookeeper:latest
  restart: always
  hostname: zoo2
  container_name: zoo2
  privileged: true
  ports:
   - 2182:2181
  volumes:
   -  /usr/local/zookeeper/zoo2/data:/data
   -  /usr/local/zookeeper/zoo2/datalog:/datalog
   - /etc/localtime:/etc/localtime    
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 2
   ZOO_PORT: 2181
   ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 # zookeeper节点列表
  networks:
   default:
    ipv4_address: 172.23.0.12

 zoo3:
  image: zookeeper:latest
  restart: always
  hostname: zoo3
  container_name: zoo3
  privileged: true
  ports:
   - 2183:2181
  volumes:
   -  /usr/local/zookeeper/zoo3/data:/data
   -  /usr/local/zookeeper/zoo3/datalog:/datalog
   - /etc/localtime:/etc/localtime      
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 3
   ZOO_PORT: 2181
   ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 # zookeeper节点列表
  networks:
   default:
    ipv4_address: 172.23.0.13

networks:
 default:
  external:
   name: zoo_kafka

kafka_docker-compose.yml

version: '3.7'
services:
 broker1:
  image: wurstmeister/kafka
  restart: always
  hostname: broker1
  container_name: broker1
  privileged: true
  ports:
   - "9091:9092"
  environment:
   KAFKA_BROKER_ID: 1
   KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
   KAFKA_CREATE_TOPICS: "cc_to_user:3:1,cc_from_user:3:1"   
   KAFKA_LISTENERS: PLAINTEXT://broker1:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.180.70:9091
   KAFKA_ADVERTISED_HOST_NAME: 192.168.180.70
   KAFKA_ADVERTISED_PORT: 9091
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
  volumes:
   - /usr/local/kafka/broker1/docker.sock:/var/run/docker.sock
   - /usr/local/kafka/broker1/kafka:/kafka
   - /usr/local/kafka/broker1/config:/opt/kafka/config
   - /etc/localtime:/etc/localtime
  external_links:
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.14

 broker2:
  image: wurstmeister/kafka
  restart: always
  hostname: broker2
  container_name: broker2
  privileged: true
  ports:
   - "9092:9092"
  environment:
   KAFKA_BROKER_ID: 2
   KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
   KAFKA_CREATE_TOPICS: "cc_to_user:3:1,cc_from_user:3:1"   
   KAFKA_LISTENERS: PLAINTEXT://broker2:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.180.70:9092
   KAFKA_ADVERTISED_HOST_NAME: 192.168.180.70
   KAFKA_ADVERTISED_PORT: 9092
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
  volumes:
   - /usr/local/kafka/broker2/docker.sock:/var/run/docker.sock  
   - /usr/local/kafka/broker2/kafka:/kafka
   - /usr/local/kafka/broker2/config:/opt/kafka/config
   - /etc/localtime:/etc/localtime   
  external_links: # 连接本compose文件以外的container
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.15

 broker3:
  image: wurstmeister/kafka
  restart: always
  hostname: broker3
  container_name: broker3
  privileged: true
  ports:
   - "9093:9092"
  environment:
   KAFKA_BROKER_ID: 3
   KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
   KAFKA_CREATE_TOPICS: "cc_to_user:3:1,cc_from_user:3:1"   
   KAFKA_LISTENERS: PLAINTEXT://broker3:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.180.70:9093
   KAFKA_ADVERTISED_HOST_NAME: 192.168.180.70
   KAFKA_ADVERTISED_PORT: 9093
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
  volumes:
   - /usr/local/kafka/broker3/docker.sock:/var/run/docker.sock    
   - /usr/local/kafka/broker3/kafka:/kafka
   - /usr/local/kafka/broker3/config:/opt/kafka/config
   - /etc/localtime:/etc/localtime    
  external_links: # 连接本compose文件以外的container
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.16

 kafka-manager:
  image: sheepkiller/kafka-manager:latest
  restart: always
  container_name: kafka-manager
  hostname: kafka-manager
  ports:
   - "9000:9000"
  links:      # 连接本compose文件创建的container
   - broker1
   - broker2
   - broker3
  external_links:  # 连接本compose文件以外的container
   - zoo1
   - zoo2
   - zoo3
  environment:
   ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181
   KAFKA_BROKERS: broker1:9092,broker2:9092,broker3:9092
   APPLICATION_SECRET: letmein
   KM_ARGS: -Djava.net.preferIPv4Stack=true
  networks:
   default:
    ipv4_address: 172.23.0.10
networks:
 default:
  external:  # 使用已创建的网络
   name: zoo_kafka

log4j.properties

log4jproperties
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
将这个文件复制到指定容器的config目录