Kafka多维度系统精讲-从入门到熟练掌握 Consumer手动提交offset 详细代码笔记

123 阅读2分钟

1、Kafka的Consumer手动提交offset

/**
 * 手动提交offset
 */
private
static void commitedOffset() {
    Properties
props = new Properties();
    props.setProperty("bootstrap.servers", "localhost:9092");
    props.setProperty("group.id", "test");
    props.setProperty("enable.auto.commit", "false");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
    // 消费订阅哪一个Topic或者几个Topic
    consumer.subscribe(Arrays.asList(TOPIC_NAME));
    while (true) {
        ConsumerRecords<String, String> records =
consumer.poll(Duration.ofMillis(10000));
        for (ConsumerRecord<String, String> record :
records) {
            // 想把数据保存到数据库,成功就成功,不成功...
            // TODO record 2 db
            System.out.printf("patition = %d , offset = %d, key = %s,
value = %s%n",
                    record.partition(), record.offset(), record.key(), record.value());
            // 如果失败,则回滚, 不要提交offset
        }

        // 如果成功,手动通知offset提交
        consumer.commitAsync();
    }
}



2、Kafka的Consumer手动提交offset,并且手动控制partition

/**
 * 手动提交offset,并且手动控制partition
 */
private
static void commitedOffsetWithPartition() {
    Properties
props = new Properties();
    props.setProperty("bootstrap.servers", "localhost:9092");
    props.setProperty("group.id", "test");
    props.setProperty("enable.auto.commit", "false");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
    // 消费订阅哪一个Topic或者几个Topic
    consumer.subscribe(Arrays.asList(TOPIC_NAME));
    while (true) {
        ConsumerRecords<String, String> records =
consumer.poll(Duration.ofMillis(10000));
        // 每个partition单独处理
        for (TopicPartition
partition : records.partitions()) {
            List<ConsumerRecord<String, String>> pRecord =
records.records(partition);
            for (ConsumerRecord<String, String> record :
pRecord) {
                System.out.printf("patition = %d , offset = %d, key = %s,
value = %s%n",
                        record.partition(), record.offset(), record.key(), record.value());

            }
            long lastOffset =
pRecord.get(pRecord.size() - 1).offset();
            // 单个partition中的offset,并且进行提交
            Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
            offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
            // 提交offset
            consumer.commitSync(offset);
            System.out.println("=============partition - " + partition + "
end================");
        }
    }
}



3、Kafka的Consumer手动指定offset的起始位置,及手动提交offset

/**
 * 手动指定offset的起始位置,及手动提交offset
 */
private
static void controlOffset() {
    Properties
props = new Properties();
    props.setProperty("bootstrap.servers", "localhost:9092");
    props.setProperty("group.id", "test");
    props.setProperty("enable.auto.commit", "false");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

    // test- 0,1两个partition
    TopicPartition
p0 = new TopicPartition(TOPIC_NAME, 0);

    // 消费订阅某个Topic的某个分区
    consumer.assign(Arrays.asList(p0));

    while (true) {
        // 手动指定offset起始位置
        /*
            1、人为控制offset起始位置
            2、如果出现程序错误,重复消费一次
         */
        /*
            1、第一次从0消费【一般情况】
            2、比如一次消费了100条, offset置为101并且存入Redis
            3、每次poll之前,从redis中获取最新的offset位置
            4、每次从这个位置开始消费
         */
        consumer.seek(p0, 700);

        ConsumerRecords<String, String> records =
consumer.poll(Duration.ofMillis(10000));
        // 每个partition单独处理
        for (TopicPartition
partition : records.partitions()) {
            List<ConsumerRecord<String, String>> pRecord =
records.records(partition);
            for (ConsumerRecord<String, String> record :
pRecord) {
                System.err.printf("patition = %d , offset = %d, key = %s,
value = %s%n",
                        record.partition(), record.offset(), record.key(), record.value());

            }
            long lastOffset =
pRecord.get(pRecord.size() - 1).offset();
            // 单个partition中的offset,并且进行提交
            Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
            offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
            // 提交offset
            consumer.commitSync(offset);
            System.out.println("=============partition - " + partition + "
end================");
        }
    }
}

kafka

视频下载地址:

链接:share.weiyun.com/hPxQDszj 密码:i6qver 

链接:share.weiyun.com/hPxQDszj 密码:7sqheg

失效++\/:cowcow2100