KAFKA

113 阅读1分钟

KAFKA

KAFKA一、快速开始使用kafka二、Java去取kafka数据的方式spring-boot整合Kafka

kafka官网:kafka.apache.org/documentati…

一、快速开始使用kafka

  1. 下载

    https://www.apache.org/dyn/closer.cgi?path=/kafka/2.7.0/kafka_2.13-2.7.0.tgz
    
  2. 解压缩

    tar -xzf kafka_2.13-2.7.0.tgz
    cd kafka_2.13-2.7.0
    
  3. 启动zookeeper

    #启动zk
    bin/zookeeper-server-start.sh config/zookeeper.properties
    #后台运行zk
    bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
    
  4. 启动kafka

    #启动kafka
    bin/kafka-server-start.sh config/server.properties
    #h后台运行kafka
    bin/kafka-server-start.sh -daemon config/server.properties
    
  5. 创建主题

    bin/kafka-topics.sh --create --topic SpirentRealtime --bootstrap-server localhost:9092
    
  6. 给主题添加事件

    bin/kafka-console-producer.sh --topic my_topic --bootstrap-server localhost:9092
    This is my first event
    This is my second event
    
  7. 读取事件

    bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092
    
  8. 可能遇见的问题

    修改jvm参数

二、Java去取kafka数据的方式

spring-boot整合Kafka
  1. 添加依赖

    <!--kafka-->
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>
    
  2. 相关配置

    spring:
      kafka:
        bootstrap-servers: localhost:9092
        template:
          default-topic: test_market_topic
        producer:
          acks: all
          retries: 3
          batch-size: 1048576
          properties:
            linger:
              ms: 100
          buffer-memory: 16777216
          key-serializer: org.apache.kafka.common.serialization.StringSerializer
          value-serializer: org.apache.kafka.common.serialization.StringSerializer
        listener:
          concurrency: 3
          ack-mode: manual
          type: batch
        consumer:
          group-id: lilou_report_alarm_comsumer
          bootstrap-servers: IP1:9092,IP2:9092,IP3:9092
          enable-auto-commit: false
          auto-offset-reset: latest
          properties:
            session:
              timeout:
                ms: 30000
          auto-commit-interval: 1000
          key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
          value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
          max-poll-records: 20
    
    1. 使用@KafkaListener注解

      @KafkaListener(topics = "lilou_internal_message_info")
          public void onMessage(List<ConsumerRecord<String, String>> records, Acknowledgment acknowledgment) {
              //处理消息
              log.info("数据大小", records.size());
              acknowledgment.acknowledge();
          }
      

    三、常用命令

    bin/kafka-consumer-groups.sh --zookeeper 127.0.0.1:2181 --list
    bin/kafka-topics.sh --zookeeper 127.0.0.1:2181 --list
    bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
    bin/kafka-console-producer.sh --broker-list PLAINTEXT://localhost:8090 --topic topic
    bin/kafka-console-consumer.sh --bootstrap-server PLAINTEXT://localhost:8090 --topic topic --from-beginning