kafka如何实现定时消费指定topic

4,096 阅读1分钟

springboot 整合 kafka消息中间件,利用@KafkaListener 即可方便快捷地实现对某个topic的消息监听,只要程序启动就可以实时消费该topic的消息数据了。那么问题来了,我不想在程序启动的时候就监控消费topic下的数据,我想实现对kafka指定topic的定时消费,甚至随心所欲地消费,那应该怎么办呢?

解决方案 talk is cheap show me the code

依赖包引入

<!--kafka-->
<dependency>
   <groupId>org.springframework.kafka</groupId>
   <artifactId>spring-kafka</artifactId>
</dependency>

消费者参数配置

kafka.consumer.servers=0.0.0.0:9092
kafka.consumer.enable.auto.commit=false
kafka.consumer.auto.commit.interval=50
kafka.consumer.session.timeout=30000
kafka.consumer.request.timeout=40000
kafka.consumer.heartbeat.interval.ms=3000
kafka.consumer.max.poll.interval.ms=2500
kafka.consumer.max.poll.records=50
kafka.consumer.poll.timeout=3000
kafka.consumer.auto.offset.reset=latest
kafka.consumer.concurrency=3
kafka.consumer.max.partition.fetch.bytes=10485760
kafka.consumer.autoStart=false
kafka.consumer.topic=topic-test
kafka.consumer.group-id=aissue-test

kafka消费者配置类实现

package com.aissue.config.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.PropertySource;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
@PropertySource("classpath:kafka.properties")
public class KafkaConsumerConfig {
    @Value("${kafka.consumer.servers}")
    private String servers;
    @Value("${kafka.consumer.enable.auto.commit}")
    private boolean enableAutoCommit;
    @Value("${kafka.consumer.session.timeout}")
    private int sessionTimeout;
    @Value("${kafka.consumer.request.timeout}")
    private int requestTimeout;
    @Value("${kafka.consumer.auto.commit.interval}")
    private String autoCommitInterval;
    @Value("${kafka.consumer.auto.offset.reset}")
    private String autoOffsetReset;
    @Value("${kafka.consumer.concurrency}")
    private int concurrency;
    @Value("${kafka.consumer.max.poll.records}")
    private int maxPollRecords;
    @Value("${kafka.consumer.poll.timeout}")
    private int pollTimeout;
    @Value("${kafka.consumer.max.partition.fetch.bytes}")
    private int maxPartitionFetchBytes;
    @Value("${kafka.consumer.heartbeat.interval.ms}")
    private int heartbeatInterval;
    @Value("${kafka.consumer.max.poll.interval.ms}")
    private int maxPollInterval;
    @Value("${kafka.consumer.autoStart}")
    private boolean autoStart;

    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.setBatchListener(true);
        factory.getContainerProperties().setPollTimeout(pollTimeout);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        factory.setAutoStartup(autoStart);
        return factory;
    }

    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    public Map<String, Object> consumerConfigs() {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);

        //字符串消费
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);//每一批数量
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        propsMap.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes);
        //添加以下两个配置,解决consumer消费停止的问题(心跳过期,退出消费者群组问题)
        propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatInterval);
        propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval);
        propsMap.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout);
        return propsMap;
    }
}

准备工作做好了,该引入的包引入了,该实现的配置类实现了,接下来就是见证奇迹的时刻...

package com.aissue.consumer;

import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.*;

/**
 * 消费kafka数据
 * topic 为 xx_capture_img_5_db
 * @author aissue
 */
@Component
@Slf4j
@EnableScheduling
public class AisConsumer {
    //kafkaListener 唯一id
    private static final String TOPIC_ID= "AisListener";
    @Autowired
    private KafkaListenerEndpointRegistry registry;

    /**
     * 消费指定topic下的
     * @param
     */
    @KafkaListener(id = TOPIC_ID, topics = "${kafka.consumer.topic}", groupId = "${kafka.consumer.group-id}",containerFactory = "kafkaListenerContainerFactory")
    public void getImg5DbData(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        log.info("kafka message:{}");
        if (records == null || records.size() == 0) {
            return;
        }
        for (ConsumerRecord<String, String> record : records) {
            Optional<String> kafkaMessage = Optional.ofNullable(record.value());
            if (kafkaMessage.isPresent()) {
                String msg = kafkaMessage.get();
                log.info("kafka message:{}",msg);
                });
            }
        }
        ack.acknowledge();
    }

		/**
		 * 容器启动3000ms后执行
		 * 每12小时执行一次
		 **/	
    @Scheduled(initialDelay=3000, fixedDelay=12*60*60*1000)
    public void start(){
        log.info("kafka listener enabled【{}】",enableListen);
				 MessageListenerContainer container = registry.getListenerContainer(ZHYY);
         if (!container.isRunning()) {
             container.start();
         }
         //恢复
         container.resume();
    }

}

小结

至此,kafka定时消费指定topic的解决方案就叙述完毕了,虽然表述不多,但是干货满满,希望能够对大家有帮助。