kafka去除zookeeper使用自带的kraft作为调度中心部署 案例

216 阅读2分钟

摘要:本文主要介绍了kafka基于自带的kraft调度中心来简单部署kafka服务;本文只给出了单节点的部署,集群部署模式按照官方来即可;也给出了基于springboot的简单集成案例。

部署案例

kafka单节点 docker-compose.yml

services:
  broker:
    image: apache/kafka:4.0.0
    container_name: broker
    environment:
      KAFKA_NODE_ID: 1
      KAFKA_PROCESS_ROLES: broker,controller
      KAFKA_LISTENERS: PLAINTEXT://:9092,CONTROLLER://localhost:9093
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.137.131:9092
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
      KAFKA_CONTROLLER_QUORUM_VOTERS: 1@localhost:9093
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_LOG_DIRS: '/var/lib/kafka/data'
    ports:
      - 9092:9092
    volumes:
      - ./data:/var/lib/kafka/data

kafdrop数据可视化面板

services:
  kafdrop:
    image: obsidiandynamics/kafdrop:latest
    container_name: kafdrop
    environment:
      JVM_OPTS: "-Xms32M -Xmx64M"
      SERVER_SERVLET_CONTEXTPATH: /
      KAFKA_BROKERCONNECT: 192.168.137.131:9092
    ports:
      - 9000:9000

面板使用案例

首页http://192.168.137.131:9000/

image.png

springboot使用案例

pom.xml maven依赖

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
    </dependencies>

Sp3KafkaApplication 启动类

@SpringBootApplication()
public class Sp3KafkaApplication {

    public static void main(String[] args) {
        SpringApplication.run(Sp3KafkaApplication.class, args);
    }

}

KafkaProducer生产者

@Service
public class KafkaProducer {

    private final KafkaTemplate<String, String> kafkaTemplate;

    public KafkaProducer(KafkaTemplate<String, String> kafkaTemplate) {
        this.kafkaTemplate = kafkaTemplate;
    }

    public void sendMessage(String topic, String key, String message) {
        kafkaTemplate.send(topic, key, message);
    }
}

KafkaConsumer 消费者

@Service
public class KafkaConsumer {

    /**
     * 单条数据消费
     * @param record
     * @param acknowledgment
     */
    @KafkaListener(topics = "my-topic", groupId = "my-group")
    public void listen(ConsumerRecord<String, String> record,
                       Acknowledgment acknowledgment) {
        System.out.println("Received message1: " + record);
        if(record.value().equals("ack")){
            acknowledgment.acknowledge();
        }else{
            throw new RuntimeException("test");
        }
    }

//    @KafkaListener(topics = "my-topic", groupId = "my-group")
//    public void listen2(ConsumerRecord<String, String> record,
//                       Acknowledgment acknowledgment) {
//        System.out.println("Received message1: " + record);
//        if(record.value().equals("ack")){
//            acknowledgment.acknowledge();
//        }
//    }
//
//    @KafkaListener(topics = "my-topic", groupId = "my-group")
//    public void listen3(ConsumerRecord<String, String> record,
//                       Acknowledgment acknowledgment) {
//        System.out.println("Received message1: " + record);
//        if(record.value().equals("ack")){
//            acknowledgment.acknowledge();
//        }
//    }
//
//    @KafkaListener(topics = "my-topic", groupId = "my-group")
//    public void listen4(ConsumerRecord<String, String> record,
//                        Acknowledgment acknowledgment) {
//        System.out.println("Received message1: " + record);
//        if(record.value().equals("ack")){
//            acknowledgment.acknowledge();
//        }
//    }

    /**
     * 批量消费
     * @param records
     * @param acknowledgment
     */
    @KafkaListener(topics = "my-topic", groupId = "my-group", containerFactory = "kafkaBatchListenerContainerFactory")
    public void listens(List<ConsumerRecord<String, String>> records,
                        Acknowledgment acknowledgment) {
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("Batch message: Key=%s, Value=%s, Partition=%d, Offset=%d%n",
                    record.key(),
                    record.value(),
                    record.partition(),
                    record.offset());
        }
        acknowledgment.acknowledge();

    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, ConsumerRecord<String, String>> kafkaBatchListenerContainerFactory(
            ConsumerFactory<String, ConsumerRecord<String, String>> consumerFactory) {
        ConcurrentKafkaListenerContainerFactory<String, ConsumerRecord<String, String>> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        factory.setBatchListener(true); // 关键:启用批处理模式
        return factory;
    }
}

application.yml配置

spring:
  kafka:
    bootstrap-servers: 192.168.137.131:9092
    listener:
      ack-mode: manual
    consumer:
      auto-offset-reset: earliest
      enable-auto-commit: false
  main:
    allow-bean-definition-overriding: true
    allow-circular-references: true

测试类

@RestController
@RequestMapping(value = "kafka")
public class KafkaController {

    @Autowired
    private KafkaProducer kafkaProducer;

    @GetMapping("sendMessage")
    public Map<String, Object> sendMessage(String topic, String key, String message){
        Map<String, Object> resultMap = new HashMap<>();
        kafkaProducer.sendMessage(topic, key, message);
        resultMap.put("success", true);
        return resultMap;
    }
}

测试结果

image.png