kafka常用操作

70 阅读2分钟

kafka

command

# cluster
kafka-server-start.sh --version
kafka-broker-api-versions.sh --bootstrap-server 127.0.0.1:9092  | grep id

# topic
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --list
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --create --topic test-abx
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --create --topic test-abx --partitions 4 --replication-factor 1
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --create --topic test-abx --config max.message.bytes=64000 --config retention.ms=10
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --describe --topic test-abx
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --alter --topic test-abx --partitions 5
kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --delete --topic test-abx

# config topic
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --alter --entity-type topics --entity-name test-abx --add-config max.message.bytes=128000
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --alter --entity-type topics --entity-name test-abx --add-config retention.ms=10
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --alter --entity-type topics --entity-name test-abx --add-config retention.bytes=1024
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --alter --entity-type topics --entity-name test-abx --add-config segment.ms=86400000
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --alter --entity-type topics --entity-name test-abx --delete-config max.message.bytes
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --describe --entity-type topics --entity-name test-abx
# cluster config
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --describe --entity-type brokers --entity-default
# broker.id=1 config
kafka-configs.sh --bootstrap-server 127.0.0.1:9092 --describe --entity-type brokers --entity-name 1

# console-producer
kafka-console-producer.sh --broker-list 127.0.0.1:9092 --topic test-abx
kafka-console-producer.sh --broker-list 127.0.0.1:9092 --topic test-abx < text

# console-consumer
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9092 --topic test-abx --from-beginning --group test-abx-group
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9092 --topic test-abx --from-beginning --max-messages 2 --timeout-ms 1000
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9092 --topic test-abx --partition 0 --offset 0
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9092 --topic __consumer_offsets --partition 15 --formatter "kafka.coordinator.group.GroupMetadataManager\$OffsetsMessageFormatter"

# consumer-group
# topic:partition
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --list
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --describe --group test-abx-group
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --delete --group test-abx-group
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --group test-abx-group --topic test-abx --reset-offsets --to-earliest --execute
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --group test-abx-group --topic test-abx:2 --reset-offsets --to-latest --execute
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --group test-abx-group --topic test-abx --reset-offsets --to-current --execute
kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --group test-abx-group --all-topics --reset-offsets --to-offset 500000 --execute


echo "
alias kfkgroup='kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --describe --group'
alias kfkgrouplist='kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --list'
alias kfktopic='kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --describe --topic'
alias kfktopiclist='kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --list'
" >> ~/.bashrc

kafka-UI

# docker
docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true -v /tmp/kafka_ui_conf.yaml:/etc/kafkaui/dynamic_config.yaml provectuslabs/kafka-ui

echo "
kafka:
  clusters:
    - name: cvm-test-mall
      bootstrapServers: localhost:29091
      schemaRegistry: http://localhost:8085
      schemaRegistryAuth:
        username: username
        password: password
#     schemaNameTemplate: "%s-value"
      metrics:
        port: 9997
        type: JMX
    - name: tx-uat-crm
      bootstrapServers: 10.223.12.17:9092
" > /tmp/kafka_ui_conf.yaml
docker run -it -d --name kafka-manager -p 9000:9000 -e ZK_HOSTS="10.243.0.24:2181" sheepkiller/kafka-manager:latest

FAQ

  1. kafka java.io.IOException: Too many open files
# 进程使用文件数
kfkpid=$(ps aux |grep kafka | grep -v grep | awk '{print $2}')
lsof -p $kfkpid | wc -l

# 查看用户允许打开最大文件数
ulimit -a

# 临时修改
ulimit -n 102400

# 永久修改
echo -e "_ soft nofile 102400\n_ hard nofile 102400\n" >> /etc/security/limits.conf

# 查看系统允许打开最大文件数
cat /proc/sys/fs/file-max

# 修改系统(非必须)
echo -e "fs.file-max = 6553600\n" >> /etc/sysctl.conf
  1. kafka java.io.IOException: No space left on device
# 调整保存时间和大小
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168

# A size-based retention policy for logs.
log.retention.bytes=5000000

# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=50000000
  1. kafka.common.MessageSizeTooLargeException
# producer.properties
# 开启压缩功能
compression.codec
commpressed.topics

# server.properties
message.max.bytes < replica.fetch.max.bytes

# consumer.properties
fetch.message.max.bytes > message.max.bytes
  1. kafka 删除数据
删除 topic,正在消费 topic 的消费组会 rebalancing,重启消费组中客户端会触发 rebalancing
如果 delete.topic.enable=true 没有设置,kafka 命令无法真正将 topic 删除而显示 marked for deletion
删除逻辑:命令删除、物理路径文件删除、zk 上信息删除