一.拉取zookeeper, kafka, kafka-manager鏡像
docker pull wyh1791/zookeeper-arm64v8
docker pull wyh1791/kafka-arm64v8
docker pull wyh1791/kafka-manager-arm64v8:v2
二.創(chuàng)建必要文件及文件夾
1.建立根文件夾
cd /
mkdir kafka
2.kafka節(jié)點(diǎn)文件夾
cd /kafka
mkdir kafka1
mkdir kafka2
mkdir kafka3
3.zookeeper文件夾
mkdir zookeeper1
mkdir zookeeper2
mkdir zookeeper3
4.zookeeper配置文件
mkdir zooConfig
cd zooConfig
mkdir zoo1
mkdir zoo2
mkdir zoo3
5.在zoo1,zoo2,zoo3中分別創(chuàng)建myid文件讯榕,并寫入分別寫入id數(shù)字越走,如zoo1中的myid中寫入1
echo "1" > ./zoo1/myid
echo "2" > ./zoo2/myid
echo "3" > ./zoo3/myid
6.創(chuàng)建zoo配置文件zoo.cfg
touch zoo.cfg
填寫如下內(nèi)容
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data
dataLogDir=/datalog
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
server.1= zoo1:2888:3888
server.2= zoo2:2888:3888
server.3= zoo3:2888:3888
三.創(chuàng)建網(wǎng)絡(luò)
docker network create --driver bridge --subnet 172.23.0.0/25 --gateway 172.23.0.1 zookeeper_network
四.創(chuàng)建docker-compose.yml文件
cd /kafka
touch docker-compose.yml
填寫內(nèi)容
- 備注: 暴露在外的地址 必須修改為你的樹莓派ip
version: '2'
services:
zoo1:
image: wyh1791/zookeeper-arm64v8
restart: always # 重啟
container_name: zoo1
hostname: zoo1
ports:
- "2181:2181"
volumes:
- "./zooConfig/zoo.cfg:/conf/zoo.cfg" # 配置
- "./zookeeper1/data:/data"
- "./zookeeper1/datalog:/datalog"
environment:
ZOO_MY_ID: 1 # id
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
networks:
default:
ipv4_address: 172.23.0.11
zoo2:
image: wyh1791/zookeeper-arm64v8
restart: always
container_name: zoo2
hostname: zoo2
ports:
- "2182:2181"
volumes:
- "./zooConfig/zoo.cfg:/conf/zoo.cfg"
- "./zookeeper2/data:/data"
- "./zookeeper2/datalog:/datalog"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
networks:
default:
ipv4_address: 172.23.0.12
zoo3:
image: wyh1791/zookeeper-arm64v8
restart: always
container_name: zoo3
hostname: zoo3
ports:
- "2183:2181"
volumes:
- "./zooConfig/zoo.cfg:/conf/zoo.cfg"
- "./zookeeper3/data:/data"
- "./zookeeper3/datalog:/datalog"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
networks:
default:
ipv4_address: 172.23.0.13
kafka1:
image: wyh1791/kafka-arm64v8 # 鏡像
restart: always
container_name: kafka1
hostname: kafka1
ports:
- 9092:9092
# - 9999:9999
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.104:9092 # 暴露在外的地址
KAFKA_ADVERTISED_HOST_NAME: kafka1 #
KAFKA_HOST_NAME: kafka1
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_ADVERTISED_PORT: 9092 # 暴露在外的端口
KAFKA_BROKER_ID: 0 #
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
# JMX_PORT: 9999 # jmx
volumes:
- /etc/localtime:/etc/localtime
- "./kafka1/logs:/kafka"
links:
- zoo1
- zoo2
- zoo3
networks:
default:
ipv4_address: 172.23.0.14
kafka2:
image: wyh1791/kafka-arm64v8
restart: always
container_name: kafka2
hostname: kafka2
ports:
- 9093:9092
# - 9998:9999
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.104:9093
KAFKA_ADVERTISED_HOST_NAME: kafka2
KAFKA_HOST_NAME: kafka2
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_ADVERTISED_PORT: 9093
KAFKA_BROKER_ID: 1
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
# JMX_PORT: 9999
volumes:
- /etc/localtime:/etc/localtime
- "./kafka2/logs:/kafka"
links:
- zoo1
- zoo2
- zoo3
networks:
default:
ipv4_address: 172.23.0.15
kafka3:
image: wyh1791/kafka-arm64v8
restart: always
container_name: kafka3
hostname: kafka3
ports:
- 9094:9092
# - 9997:9999
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.104:9094
KAFKA_ADVERTISED_HOST_NAME: kafka3
KAFKA_HOST_NAME: kafka3
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_ADVERTISED_PORT: 9094
KAFKA_BROKER_ID: 2
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
# JMX_PORT: 9999
volumes:
- /etc/localtime:/etc/localtime
- "./kafka3/logs:/kafka"
links:
- zoo1
- zoo2
- zoo3
networks:
default:
ipv4_address: 172.23.0.16
kafka-manager:
image: wyh1791/kafka-manager-arm64v8:v2
restart: always
container_name: kafka-manager
hostname: kafka-manager
ports:
- 9000:9000
links:
- kafka1
- kafka2
- kafka3
- zoo1
- zoo2
- zoo3
environment:
ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_BROKERS: kafka1:9092,kafka2:9093,kafka3:9094
APPLICATION_SECRET: letmein
KAFKA_MANAGER_AUTH_ENABLED: "true" # 開啟驗(yàn)證
KAFKA_MANAGER_USERNAME: "admin" # 用戶名
KAFKA_MANAGER_PASSWORD: "admin" # 密碼
KM_ARGS: -Djava.net.preferIPv4Stack=true
networks:
default:
ipv4_address: 172.23.0.10
networks:
default:
external:
name: zookeeper_network
五.啟停集群
1.啟動(dòng)集群
docker-compose up -d
2.停止集群
docker-compose stop
3.單個(gè)節(jié)點(diǎn)停止
docker rm -f zoo1
六.查看zookeeper集群是否正常
docker exec -it zoo1 bash
bin/zkServer.sh status # mode 為leader或follower正常
七.驗(yàn)證集群
1.新建topic
docker exec -it kafka1 bash
cd /opt/kafka/bin/
./kafka-topics.sh --create --zookeeper zoo1:2181 --replication-factor 1 --partitions 3 --topic test001
./kafka-topics.sh --list --zookeeper zoo1:2181
./kafka-topics.sh --list --zookeeper zoo2:2181
./kafka-topics.sh --list --zookeeper zoo3:2181
2.生產(chǎn)消息
./kafka-console-producer.sh --broker-list kafka1:9092,kafka2:9092,kafka3:9092 --topic test001
3.消費(fèi)消息
./kafka-console-consumer.sh --bootstrap-server kafka1:9092,kafka2:9092,kafka3:9092 --topic test001