springboot1.4.2整合kafka

95 阅读2分钟

公司要求用kafka实现一个报表统计的功能。 本以为是个很简单的事情,结果因为公司用的springboot版本是1.4.2,并不支持直接在application.yml里面配置kafka。事情变得没那么简单. 话不多说,直接上代码

/**
 * @Description 生产者配置
 * @Author Jieln
 * @Date 2022/11/1
 **/
@Configuration
@EnableKafka
public class kafkaConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;
    public Map<String,Object> producerConfigs(){
        Map<String,Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,servers);
        props.put(ProducerConfig.ACKS_CONFIG, "1");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }
    public ProducerFactory<String,String>producerFactory(){
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }
    @Bean
    public KafkaTemplate<String,String> kafkaTemplate(){
        return new KafkaTemplate<String,String>(producerFactory());
    }
}
```
/**
 * created by chenyaxin
 * on2022/11/15 14:37
 * 消费者配置
 */
```
@Configuration
@EnableKafka
public class kafkaConsumerConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;
    //指定默认消费者group id,消费者监听到的也是这个
    private String groupId="test_topic";//本地测试使用
    //消费者在读取一个没有offset的分区或者offset无效时的策略,默认earliest是从头读,latest不是从头读
    private String autoOffsetReset="earliest";
    //是否自动提交偏移量offset,默认为true,一般是false,如果为false,则auto-commit-interval属性就会无效
    private boolean  enableAutoCommit=true;
    //自动提交间隔时间,接收到消息后多久会提交offset,前提需要开启自动提交,也就是enable-auto-commit设置为true,默认单位是毫秒(ms),如果写10s,最后加载的显示值为10000ms,需要符合特定时间格式:1000ms,1S,1M,1H,1D(毫秒,秒,分,小时,天)
    private String autoCommitInterval="1000";
    //指定消息key和消息体的编解码方式
    private String keyDeserializerClass="org.apache.kafka.common.serialization.StringDeserializer";
    private String valueDeserializerClass ="org.apache.kafka.common.serialization.StringDeserializer";
    //批量消费每次最多消费多少条信息
    private String maxPollRecords="50";

    @Bean
    ConcurrentKafkaListenerContainerFactory<String, String>
    kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(false);//这里为true的时候,KafkaConsumer那里需要使用批量消费方法,不然报错
        return factory;
    }

    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);

        return props;
    }
}
//接收消息
@Autowired
KafkaTemplate<String,String> kafkaTemplate;
@Autowired
private ChargeOrderReportService chargeOrderReportService;
@Autowired
private PowerStationChargeStatisReportTbService powerStationChargeStatisReportTbService;
public void send(String topic,String message){
    try {
        ListenableFuture<SendResult<String ,String>> re = kafkaTemplate.send(topic, message);
        re.addCallback(new ListenableFutureCallback<SendResult>() {
            @Override
            public void onFailure(Throwable ex) {
                logger.error("kafka发送失败",ex);
            }

            @Override
            public void onSuccess(SendResult result) {
                logger.info("kafka发送成功,message:{},topic:{},key{}",
                        result.getProducerRecord().value(),
                        result.getProducerRecord().topic(),
                        result.getProducerRecord().key());
            }
        });
    }catch (Exception ex){
        logger.error("发送卡夫卡异常",ex);
    }
}

//消费消息
@KafkaListener(topics = "chargeOrderReport")
public void topicListener1(ConsumerRecord<String, String> record) {
    try {
        String value = record.value();
        logger.info("kafka接受信息成功,value:{}",value);
        chargeOrderReportService.handleMesFromKafka(value);
    }catch (Exception e){
        logger.info("kafka接受信息消费异常,e:{}",e);
    }

}