服务器端SSL证书签发
准备工作:修改/etc/hosts文件,自定义一个hosts名
第一步:为了方便证书的保存管理,这里先专门创建几个目录来保存证书
mkdir -p /opt/tools/ca/{root,server,client,trust}
第二步:生成server.keystore.jks文件(即:生成服务端的keystore文件)
keytool -keystore /opt/tools/ca/server/server.keystore.jks -alias ds-kafka-single -validity 365 -genkey -keypass dscp2021 -keyalg RSA -dname "CN=kafka-single,OU=aspire,O=aspire,L=beijing,S=beijing,C=cn" -storepass dscp2021 -ext SAN=DNS:kafka-single
openssl req -new -x509 -keyout /opt/tools/ca/root/ca-key -out /opt/tools/ca/root/ca-cert -days 365 -passout pass:dscp2021 -subj "/C=cn/ST=beijing/L=beijing/O=aspire/OU=aspire/CN=kafka-single"
keytool -keystore /opt/tools/ca/trust/client.truststore.jks -alias CARoot -import -file /opt/tools/ca/root/ca-cert -storepass dscp2021
keytool -keystore /opt/tools/ca/trust/server.truststore.jks -alias CARoot -import -file /opt/tools/ca/root/ca-cert -storepass dscp2021
keytool -keystore /opt/tools/ca/server/server.keystore.jks -alias ds-kafka-single -certreq -file /opt/tools/ca/server/server.cert-file -storepass dscp2021
openssl x509 -req -CA /opt/tools/ca/root/ca-cert -CAkey /opt/tools/ca/root/ca-key -in /opt/tools/ca/server/server.cert-file -out /opt/tools/ca/server/server.cert-signed -days 365 -CAcreateserial -passin pass:dscp2021
keytool -keystore /opt/tools/ca/server/server.keystore.jks -alias CARoot -import -file /opt/tools/ca/root/ca-cert -storepass dscp2021
keytool -keystore /opt/tools/ca/server/server.keystore.jks -alias ds-kafka-single -import -file /opt/tools/ca/server/server.cert-signed -storepass dscp2021
kafka:
bootstrap-servers: 192.168.0.119:9095
producer:
# 发生错误后,消息重发的次数。
retries: 0
#当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
batch-size: 16384
# 设置生产者内存缓冲区的大小。
buffer-memory: 33554432
# 键的序列化方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
# 值的序列化方式
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
# acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
acks: 1
consumer:
# 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
auto-commit-interval: 1S
# 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: earliest
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
enable-auto-commit: false
# 键的反序列化方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 值的反序列化方式
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
# 在侦听器容器中运行的线程数。
concurrency: 5
#listner负责ack,每调用一次,就立即commit
ack-mode: manual_immediate
missing-topics-fatal: false
# ssl基础配合配置
ssl:
protocol: SSL
key-store-location: file:/opt/tools/ca/server/server.keystore.jks
key-store-password: dscp2021
key-password: dscp2021
trust-store-location: file:/opt/tools/ca/trust/client.truststore.jks
trust-store-password: dscp2021
key-store-type: JKS
trust-store-type: JKS
properties:
ssl:
endpoint:
identification:
algorithm: ''
security:
protocol: SSL
application.yml
Springboot --kafka--ssl
/opt/tools/zk/kafka/bin/kafka-console-producer.sh --broker-list kafka-single:9095 --topic topicOne --producer.config /opt/tools/zk/kafka/kafka-ssl-prod.properties
启动生产者
bootstrap.servers=kafka-single:9095
security.protocol=SSL
ssl.truststore.location=/usr/ca/trust/server.truststore.jks
ssl.truststore.password=ds1994
ssl.keystore.password=ds1994
ssl.keystore.location=/usr/ca/server/server.keystore.jks
vi kafka-ssl-prod.properties
/opt/tools/zk/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka-single:9095 --topic topicOne --from-beginning --consumer.config /opt/tools/zk/kafka/kafka-ssl.properties
启动消费者
security.protocol=SSL
group.id=test-group
ssl.truststore.location=/opt/tools/ca/trust/server.truststore.jks
ssl.truststore.password=dscp2021
ssl.keystore.password=dscp2021
ssl.keystore.location=/opt/tools/ca/server/server.keystore.jks
vi kafka-ssl.properties
启动&&测试
############################# Server Basics #############################
# SSL认证配置
# 如果配置了SSL认证,那么原来的port和advertised.listeners可以注释掉了
listeners=SSL://kafka-single:9095
advertised.listeners=SSL://kafka-single:9095
ssl.keystore.location=/opt/tools/ca/server/server.keystore.jks
ssl.keystore.password=dscp2021
ssl.key.password=dscp2021
ssl.truststore.location=/opt/tools/ca/trust/server.truststore.jks
ssl.truststore.password=dscp2021
ssl.client.auth=required
ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
ssl.keystore.type=JKS
ssl.truststore.type=JKS
# kafka2.0.x开始,将ssl.endpoint.identification.algorithm设置为了HTTPS,即:需要验证主机名
# 如果不需要验证主机名,那么可以这么设置 ssl.endpoint.identification.algorithm=即可
ssl.endpoint.identification.algorithm=HTTPS
# 设置内部访问也用SSL,默认值为security.inter.broker.protocol=PLAINTEXT
security.inter.broker.protocol=SSL
broker.id=0
############################# Socket Server Settings #############################
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
############################# Log Basics #############################
log.dirs=/usr/data/kafka
num.partitions=1
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Retention Policy #############################
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
############################# Group Coordinator Settings #############################
group.initial.rebalance.delay.ms=0