Docker-Compose安装Kafka&Zookeeper集群

kafka镜像

zookeeper镜像

创建docker网络

1
2
3
4
5
# 命令创建Docker网络
docker network create --driver bridge --subnet 10.10.14.0/24 --gateway 10.10.14.1 zookeeper_network

# 查看 Docker网络
docker network ls

Zookeeper集群

目录结构:

1
2
3
4
5
6
7
8
9
10
├── docker-compose.yml
├── zoo1
│ ├── data
│ └── datalog
├── zoo2
│ ├── data
│ └── datalog
└── zoo3
├── data
└── datalog

编辑 docker-compose.yml 配置文件:

vim docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
version: '3.8'

networks:
default:
external:
name: zookeeper_network
services:
zoo1:
image: zookeeper:latest
container_name: zoo1
restart: always
hostname: zoo1
ports:
- 2181:2181
volumes:
- "/opt/zookeeper/zoo1/data:/data"
- "/opt/zookeeper/zoo1/datalog:/datalog"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2:
image: zookeeper:latest
container_name: zoo2
restart: always
hostname: zoo2
ports:
- 2182:2181
volumes:
- "/opt/zookeeper/zoo2/data:/data"
- "/opt/zookeeper/zoo2/datalog:/datalog"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3:
image: zookeeper:latest
container_name: zoo3
restart: always
hostname: zoo3
ports:
- 2183:2181
volumes:
- "/opt/zookeeper/zoo3/data:/data"
- "/opt/zookeeper/zoo3/datalog:/datalog"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181

Kafka集群

目录结构如下:

1
2
3
4
5
.
├── data1
├── data2
├── data3
└── docker-compose.yml

编辑 docker-compose.yml 配置文件:

vim docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
version: '3.8'

networks:
default:
external:
name: zookeeper_network

services:
kafka1:
image: wurstmeister/kafka:2.13-2.8.1
restart: unless-stopped
container_name: kafka1
hostname: kafka1
ports:
- "9092:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.10.0.106:9092 ## 宿主机IP
KAFKA_ADVERTISED_HOST_NAME: kafka1
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_LOG_RETENTION_HOURS: 120
KAFKA_MESSAGE_MAX_BYTES: 10000000
KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
KAFKA_NUM_PARTITIONS: 3
KAFKA_DELETE_RETENTION_MS: 1000
volumes:
- "/opt/kafka/data1/:/kafka"
kafka2:
image: wurstmeister/kafka:2.13-2.8.1
restart: unless-stopped
container_name: kafka2
hostname: kafka2
ports:
- "9093:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 2
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.10.0.106:9093 ## 宿主机IP
KAFKA_ADVERTISED_HOST_NAME: kafka2
KAFKA_ADVERTISED_PORT: 9093
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_LOG_RETENTION_HOURS: 120
KAFKA_MESSAGE_MAX_BYTES: 10000000
KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
KAFKA_NUM_PARTITIONS: 3
KAFKA_DELETE_RETENTION_MS: 1000
volumes:
- "/opt/kafka/data2/:/kafka"
kafka3:
image: wurstmeister/kafka:2.13-2.8.1
restart: unless-stopped
container_name: kafka3
hostname: kafka3
ports:
- "9094:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 3
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.10.0.106:9094 ## 宿主机IP
KAFKA_ADVERTISED_HOST_NAME: kafka3
KAFKA_ADVERTISED_PORT: 9094
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_LOG_RETENTION_HOURS: 120
KAFKA_MESSAGE_MAX_BYTES: 10000000
KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
KAFKA_NUM_PARTITIONS: 3
KAFKA_DELETE_RETENTION_MS: 1000
volumes:
- "/opt/kafka/data3/:/kafka"
kafka-ui: # Kafka 图形管理界面
image: provectuslabs/kafka-ui
restart: unless-stopped
container_name: kafka-ui
hostname: kafka-ui
ports:
- "9000:8080"
links: # 连接本compose文件创建的container
- kafka1
- kafka2
- kafka3
environment:
SERVER_SERVLET_CONTEXT_PATH: /kafka-ui # 访问地址:host:port/kafkaui/ui
AUTH_TYPE: "LOGIN_FORM"
SPRING_SECURITY_USER_NAME: suPerAdmin
SPRING_SECURITY_USER_PASSWORD: suPerAdmin
KAFKA_CLUSTERS_0_NAME: 10.10.0.106-kafka
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:9092,kafka2:9093,kafka3:9094
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin";'

测试运行状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# 启动
docker-compose up -d

# 移除
docker-compose down -v

# 更新docker-compose
docker-compose up -d --build

# 停止
docker-compose stop

# 启动
docker-compose start

# 重启
docker-compose restart

# 查看容器运行状态
docker ps

# 测试生成者
docker exec -it kafka1 /bin/bash
kafka-console-producer.sh --broker-list kafka1:9092,kafka2:9092,kafka3:9092 --topic test

# 测试消费者
docker exec -it kafka2 /bin/bash
kafka-console-consumer.sh --bootstrap-server kafka1:9092,kafka2:9092,kafka3:9092 --topic test --from-beginning

参考:

docker-compose安装kafka集群并解决docker内kafka外界无法访问问题

Docker搭建带SASL用户密码验证的Kafka

docker-compose配置带密码验证的kafka