SpringBoot集成Kafka

1,202 阅读4分钟

参考资料:

ps:参考资料比较清晰,这里做一些补充

Step1:添加依赖

<!-- kafka -->
<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>


<!-- 解决springboot 1.5以后的ConfigurationProperties路径问题 -->
<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-configuration-processor</artifactId>
    <optional>true</optional>
</dependency>

启动类,增加配置

@EnableScheduling
@EnableConfigurationProperties
@SpringBootApplication
public class SpringbootHelloApplication {

    public static void main(String[] args) {
        SpringApplication.run(SpringbootHelloApplication.class, args);
    }
}

Step2:配置kafka

在application.properties文件中配置以下信息

spring.application.name=springboot_hello
server.port=9001

# kafka配置 start
# 服务器配置
spring.kafka.bootstrap-servers=192.168.31.11:9093

# 生产者配置
# 把发送的消息转成JSON格式
spring.kafka.producer.value-serializer=org.springframework.kafka.support.serializer.JsonSerializer

#producer要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下:
#acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。
#acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
#acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
#可以设置的值为:all, -1, 0, 1
spring.kafka.producer.acks=1

# 消费者配置
# 配置偏移量是否重置,这里配置为重置时读取最旧的消息
spring.kafka.consumer.auto-offset-reset=earliest
# 手动提交
spring.kafka.consumer.enable-auto-commit=false
spring.kafka.listener.ack-mode=manual

# 主题、分区、副本配置
kafka.topics[0].name=topic1
kafka.topics[0].num-partitions=3
# 一个分区副本数量,包含leader副本,设置为1时,表示只有leader副本,没有follower副本
kafka.topics[0].replication-factor=1

kafka.topics[1].name=topic2
kafka.topics[1].num-partitions=1
kafka.topics[1].replication-factor=1


kafka.topics[2].name=topic3
kafka.topics[2].num-partitions=2
kafka.topics[2].replication-factor=1
# kafka配置 end

创建主题和kafka配置类

@Data
@Component
@ConfigurationProperties(prefix = "kafka")
public class TopicConfig {
    private List<Topic> topics;

    /**
     * 静态内部类,其实就是外部类,只是代码位置写在一个类里面
     */
    @Data
    public static class Topic {
        String name;
        Integer numPartitions = 3;
        Short replicationFactor = 1;

        public NewTopic toNewTopic() {
            return new NewTopic(this.name, this.numPartitions, this.replicationFactor);
        }
    }
}


@Slf4j
@Configuration
public class KafkaConfig {

    private final TopicConfig configurations;
    private final GenericWebApplicationContext context;

    public KafkaConfig(TopicConfig configurations, GenericWebApplicationContext genericContext) {
        this.configurations = configurations;
        this.context = genericContext;
    }


    /**
     * JSON消息转换器
     *
     * @return
     */
    @Bean
    public RecordMessageConverter jsonConverter() {
        return new StringJsonMessageConverter();
    }

    /**
     * 手动注册NewTopic
     * 当bean创建完成的时候,会后置执行@PostConstruct修饰的方法
     */
    @PostConstruct
    public void init() {
        configurations.getTopics().forEach(item -> context.registerBean(item.name, NewTopic.class, item::toNewTopic));
    }
}

Step3:传输的消息实体

自定义一个实体类,用来传输。

@Data
@NoArgsConstructor
@AllArgsConstructor
public class Book implements Serializable {
    private Long id;
    private String name;
}

Step4:生产者

这块代码可以先看参考资料,然后再来看这个。spring提供kafkaTemplate来发送消息。

@Slf4j
@Service
public class ProducerService {

    private final KafkaTemplate<String, Object> kafkaTemplate;

    public ProducerService(KafkaTemplate<String, Object> kafkaTemplate) {
        this.kafkaTemplate = kafkaTemplate;
    }

    /**
     * 同步发送消息
     *
     * @param topic
     * @param obj
     */
    @SneakyThrows
    public SendResult<String, Object> sendMessageSync(String topic, Object obj) {
        ProducerRecord producerRecord = new ProducerRecord(topic, obj);
        return this.sendMessageSync(producerRecord);
    }

    /**
     * 同步发送消息
     *
     * @param producerRecord
     */
    @SneakyThrows
    public SendResult<String, Object> sendMessageSync(ProducerRecord producerRecord) {
        String topic = producerRecord.topic();
        Object obj = producerRecord.value();
        log.info(" --- >> 生产者发送消息:主题--{},消息--{}", producerRecord.topic(), JSONObject.toJSONString(obj));
        SendResult<String, Object> sendResult = kafkaTemplate.send(topic, obj).get();
        RecordMetadata metadata = sendResult.getRecordMetadata();
        log.info(" --- >> 生产者接收发送结果:主题--{},消息--{},分区--{},偏移量--{}",
                topic, JSONObject.toJSONString(obj), metadata.partition(), metadata.offset());
        return sendResult;
    }

    /**
     * 异步发送消息
     *
     * @param topic
     * @param obj
     */
    public void sendMessageAsync(String topic, Object obj) {
        ListenableFutureCallback<SendResult<String, Object>> callback = 
            						new ListenableFutureCallback<SendResult<String, Object>>() {
            @Override
            public void onFailure(Throwable throwable) {
                log.error(" --- >> 生产者发送消息:发生异常", throwable);
            }

            @Override
            public void onSuccess(SendResult<String, Object> sendResult) {
                RecordMetadata metadata = sendResult.getRecordMetadata();
                log.info(" --- >> 生产者接收发送结果:主题--{},消息--{},分区--{},偏移量--{}",
                        topic, JSONObject.toJSONString(obj), metadata.partition(), metadata.offset());
            }
        };
        this.sendMessageAsnc(topic, obj, callback);
    }

    /**
     * 异步发送消息
     *
     * @param topic
     * @param obj
     */
    public void sendMessageAsnc(String topic, Object obj, ListenableFutureCallback futureCallback) {
        log.info(" --- >> 生产者发送消息:主题--{},消息--{}", topic, JSONObject.toJSONString(obj));
        ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(topic, obj);
        future.addCallback(futureCallback);
    }
}

Step5:消费者

通过在方法上使用 @KafkaListener 注解监听消息,当有消息的时候就会通过 poll 下来消费。

@Slf4j
@Service
public class ConsumerService {
    @Value("${kafka.topics[0].name}")
    String myTopic;

    @Value("${kafka.topics[1].name}")
    String myTopic2;

    private final ObjectMapper objectMapper = new ObjectMapper();

    /**
     * 通过在方法上使用 @KafkaListener 注解监听消息,当有消息的时候就会通过 poll 下来消费。
     *
     * @param record
     */
    @SneakyThrows
    @KafkaListener(topics = {"${kafka.topics[0].name}"}, groupId = "group1")
    public void consumeMessage(ConsumerRecord<String, String> record, Acknowledgment acknowledgment) {
        Book book = objectMapper.readValue(record.value(), Book.class);
        log.info(" --- >> 消费者读取消息:topic--{},分区--{},消息--{}", myTopic, record.partition(), book.toString());
        acknowledgment.acknowledge();
    }

    @SneakyThrows
    @KafkaListener(topics = {"${kafka.topics[1].name}"}, groupId = "group2")
    public void consumeMessage2(Book book, Acknowledgment acknowledgment) {
        log.info(" --- >> 消费者读取消息:topic--{},消息--{}", myTopic2, book.toString());
        acknowledgment.acknowledge();
    }
}

Step6:测试

@Component
public class KafkaTask {

    @Value("${kafka.topics[0].name}")
    String myTopic;

    @Value("${kafka.topics[1].name}")
    String myTopic2;

    @Autowired
    private ProducerService producerService;

    private AtomicLong atomicLong = new AtomicLong();

    @Scheduled(cron = "0/10 * * * * ? ")
    public void testKafka() {
        this.producerService.sendMessageAsync(myTopic, new Book(atomicLong.addAndGet(1), "爱丽丝冒险记" + atomicLong.get()));
        this.producerService.sendMessageAsync(myTopic2, new Book(atomicLong.addAndGet(1), "疯狂石头" + atomicLong.get()));
    }
}

image-20210117130133554