1.编写dockerfile文件,用来创建Kafka镜像
首先官网下载kafka 3.1 版本的tar包,然后编写dockerfile文件,和entrypoint文件 dockerfile文件如下
FROM openjdk:17.0.2-slim-bullseye
RUN groupadd -r -g 999 kafka && useradd -r -g kafka -u 999 kafka
ENV GOSU_VERSION 1.14
RUN set -eux; \
savedAptMark="$(apt-mark showmanual)"; \
apt-get update; \
apt-get install -y --no-install-recommends ca-certificates dirmngr gnupg wget; \
rm -rf /var/lib/apt/lists/*; \
dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \
wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch"; \
wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch.asc"; \
export GNUPGHOME="$(mktemp -d)"; \
gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4; \
gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu; \
gpgconf --kill all; \
rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc; \
apt-mark auto '.*' > /dev/null; \
[ -z "$savedAptMark" ] || apt-mark manual $savedAptMark > /dev/null; \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
chmod +x /usr/local/bin/gosu; \
gosu --version; \
gosu nobody true
COPY kafka_2.13-3.1.0.tar /usr/src/
COPY docker-entrypoint.sh /usr/local/bin/
ENV KAFKA_HOME /usr/local/kafka
ENV PATH $KAFKA_HOME/bin:$PATH
RUN set -eux; \
mkdir -p /usr/local/kafka; \
chmod +x /usr/local/bin/*.sh;\
mkdir /tmp/kafka-logs && chown kafka:kafka /tmp/kafka-logs;\
chown kafka:kafka /usr/local/kafka;\
tar -xf /usr/src/kafka_2.13-3.1.0.tar -C /usr/local/kafka --strip-components=1;
VOLUME /tmp/kafka-logs
WORKDIR /tmp/kafka-logs
ENTRYPOINT ["docker-entrypoint.sh"]
EXPOSE 9092
CMD ["/usr/local/kafka/config/server.properties"]
entrypoint文件如下
#!/bin/sh
if [ "$(id -u)" = '0' ]; then
find . \! -user kafka -exec chown kafka '{}' +
exec gosu kafka "$0" "$@"
fi
exec kafka-server-start.sh "$@"
这个镜像的作用就是使用/usr/local/kafka/config/server.properties 这个位置的配置文件启动,如果我们想要替换配置文件的话,将这个文件映射出来即可。
此时目录应该如下所示
控制台找到文件对应目录,执行build命令就可以创建kafka镜像了
docker build -f Dockerfile --tag kafka:3.1.0 .
结果如下
docker images;
REPOSITORY TAG IMAGE ID CREATED SIZE
kafka 3.1.0 68a527be4461 2 days ago 595MB
2.编写docker-compose文件启动kafka镜像
创建一个3节点的kafka集群,首先得编写3个配置文件,因为我们刚刚创建的镜像用的是镜像里面的配置文件,所以将其映射出来即可。
配置文件需注意3处修改点
1.broker.id代表每个Kafka进程在集群中的id,需要唯一
broker.id=0
2.advertised.listeners代表kafka注册至zookeeper中的地址,这个地址最好改成本机的ip,
不然kafka会将容器的ip注册至zookeeper,导致我们在容器外面测试的时候连接不上
advertised.listeners=PLAINTEXT://192.168.0.108:9092
3.设置zookeeper地址,因为加入了同一个网络,所以直接用容器名映射即可
zookeeper.connect=zookeeper:2181
配置文件写好后编写docker-compose文件
version: '2.2'
services:
kafka_0:
image: kafka:3.1.0
container_name: kafka_0
ports:
- "9092:9092"
volumes:
- ./kafka_config/broker_0/server.properties:/usr/local/kafka/config/server.properties
networks:
- kafka_net
kafka_1:
image: kafka:3.1.0
container_name: kafka_1
ports:
- "9093:9092"
volumes:
- ./kafka_config/broker_1/server.properties:/usr/local/kafka/config/server.properties
networks:
- kafka_net
kafka_2:
image: kafka:3.1.0
container_name: kafka_2
ports:
- "9094:9092"
volumes:
- ./kafka_config/broker_2/server.properties:/usr/local/kafka/config/server.properties
networks:
- kafka_net
zookeeper_test:
image: zookeeper:latest
container_name: zookeeper
ports:
- "2181:2181"
networks:
- kafka_net
networks:
kafka_net:
driver: bridge
这样的话就可以直接执行命令启动集群了
docker-compose -f kafka.yaml up -d
接下来也可以创建个topic 用代码测试测试
直接在容器外执行即可
docker exec -it kafka_1 kafka-topics.sh --create --topic quickstart --partitions 4 --replication-factor 3 --bootstrap-server localhost:9092
编写代码测试
消息生产者
public class ProducerFastStart {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, Constant.BROKER_LIST);
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
try {
for (int i = 0; i < 100; i++) {
ProducerRecord<String, String> record = new ProducerRecord<>(Constant.TOPIC, "hello, kafka:" + i);
producer.send(record).get();
}
} catch (Exception e) {
e.printStackTrace();
}
producer.close();
}
}
消息消费者
public class ConsumerFastStart {
public static final String groupId = "group.demo";
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, Constant.BROKER_LIST);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
//订阅主题
consumer.subscribe(Collections.singletonList(Constant.TOPIC));
//循环消费消息
while (true) {
ConsumerRecords<String, String> records =
consumer.poll(Duration.ofMillis(5000));
for (ConsumerRecord<String, String> record : records) {
System.out.println("========================================");
System.out.println(record.value());
System.out.println(record.topic());
System.out.println(record.partition());
System.out.println(record.offset());
System.out.println("========================================");
}
}
}
}
搭建和部署到这边就完成了。 本节相关文件及代码都以上传github,欢迎讨论