SpringBoot Kafka基础教程

1,503 阅读2分钟

摘要:本文主要介绍了kafka基于springboot的基础用法;

基础demo

  • 项目结构

image.png

  • pom.xml
<parent>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-parent</artifactId>
    <version>2.5.4</version>
    <relativePath/> <!-- lookup parent from repository -->
</parent>
<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    <!--引入kafka依赖-->
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>

    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <optional>true</optional>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-test</artifactId>
        <scope>test</scope>
        <exclusions>
            <exclusion>
                <groupId>org.junit.vintage</groupId>
                <artifactId>junit-vintage-engine</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>

    <dependency>
        <groupId>com.alibaba</groupId>
        <artifactId>fastjson</artifactId>
        <version>1.2.61</version>
    </dependency>
</dependencies>
  • application.yml
server:
  port: 8080

spring:
  application:
    name: kafka
  kafka:
    bootstrap-servers: 192.168.137.110:9092 # kafka集群信息
    producer: # 生产者配置
      retries: 3 # 设置大于0的值,则客户端会将发送失败的记录重新发送
      batch-size: 16384 #16K
      buffer-memory: 33554432 #32M
      acks: 1
      # 指定消息key和消息体的编解码方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
      group-id: defaultGroup # 消费者组
      enable-auto-commit: false # 关闭自动提交
      auto-offset-reset: earliest # 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      # 当每一条记录被消费者监听器(ListenerConsumer)处理之后提交
      # RECORD
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后提交
      # BATCH
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,距离上次提交时间大于TIME时提交
      # TIME
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,被处理record数量大于等于COUNT时提交
      # COUNT
      # TIME | COUNT 有一个条件满足时提交
      # COUNT_TIME
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
      # MANUAL
      # 手动调用Acknowledgment.acknowledge()后立即提交,一般使用这种
      # MANUAL_IMMEDIATE
      ack-mode: manual_immediate
  • 生产者消费者
# 先创建一个对象
@Data
public class UserDto implements Serializable {

    private static final long serialVersionUID = 5720678210505623403L;

    private Long userId;

    private String name;

}

#生产者
@Component
public class MessageProducer {

    @Autowired
    private KafkaTemplate<String,Object> kafkaTemplate;

    /**
     * 发送消息
     * @param content
     */
    public void sendMessage(String content){
        kafkaTemplate.send("hello",content);
    }

    public void sendUserMessage(UserDto userDto){
        kafkaTemplate.send("user", JSON.toJSONString(userDto));
    }

}
#消费者
@Component
public class MessageConsumer {

//    @KafkaListener(topics = "hello", groupId = "defaultGroup")
//    public void hello(ConsumerRecord<?, ?> record){
//        System.out.println("简单消费:"+record.topic()+"-"+record.partition()+"-"+record.value());
//    }

    /**
     * 手动确认消息
     * @param record
     * @param ack
     */
    @KafkaListener(topics = "hello", groupId = "defaultGroup")
    public void hello(ConsumerRecord<?, ?> record, Acknowledgment ack){
        System.out.println("简单消费:"+record.topic()+"-"+record.partition()+"-"+record.value());
        ack.acknowledge();
    }

//    /**
//     * 配置多个消费组
//     * @param record
//     */
//    @KafkaListener(topics = "hello", groupId = "defaultGroup2")
//    public void hello2(ConsumerRecord<?, ?> record){
//        System.out.println("简单消费2:"+record.topic()+"-"+record.partition()+"-"+record.value());
//    }

    @KafkaListener(topics = "user", groupId = "defaultGroup")
    public void user(ConsumerRecord<?, ?> record, Acknowledgment ack){
        System.out.println("简单消费USER:"+record.topic()+"-"+record.partition()+"-"+record.value());
        ack.acknowledge();
    }
}

# 控制器
@RestController
@RequestMapping(value = "message")
public class MessageController {

    @Autowired
    private MessageProducer messageProducer;

    @GetMapping(value = "hello")
    public Object hello(String content){
        messageProducer.sendMessage(content);
        return "SUCCESS";
    }

    @GetMapping(value = "user")
    public Object user(UserDto userDto){
        messageProducer.sendUserMessage(userDto);
        return "SUCCESS";
    }
}


  • 消费过滤器使用

注意:必须要设置为自动确认消息才可以enable-auto-commit: true

@Component
public class MessageFilterConsumer {

    /**
     * 消息过滤器
     * @param consumerFactory
     * @return
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory filterContainerFactory(ConsumerFactory consumerFactory){
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
        factory.setConsumerFactory(consumerFactory);
        // 被过滤的消息被丢弃
        factory.setAckDiscarded(true);
        factory.setRecordFilterStrategy(consumerRecord -> {
            if("user".equals(consumerRecord.topic())){
                UserDto userDto = JSON.parseObject(consumerRecord.value().toString(),UserDto.class);
                if(userDto.getUserId()%2 == 0){
                    return false;
                }
            }
            return true;
        });
        return factory;
    }

    @KafkaListener(topics = "user", groupId = "filterGroup",containerFactory = "filterContainerFactory")
    public void user(ConsumerRecord<?, ?> record){
        System.out.println("filter消费USER:"+record.topic()+"-"+record.partition()+"-"+record.value());
    }
}

运行结果

image.png

注意点