senior Kafka

164 阅读2分钟

Kafka 生产者

public class MyProducer {
    public static void main(String[] args) {
//      创建配置对象
        Properties props = new Properties();

        //broker-list 指定kafka集群位置
//        props.put("bootstrap.servers", "hadoop102:9092");
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");

        //acks 0,1,-1(all)
//        props.put("acks", "all");
        props.put(ProducerConfig.ACKS_CONFIG,"all");

        //重试次数
//        props.put("retries", 5);
        props.put(ProducerConfig.RETRIES_CONFIG,5);

        //批次大小  16KB
//        props.put("batch.size", 16384);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);

        //等待时间
//        props.put("linger.ms", 1);
        props.put(ProducerConfig.LINGER_MS_CONFIG,1);

        //RecordAccumulator缓冲区大小 32M
//        props.put("buffer.memory", 33554432);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);

        //指定kv的序列化器
//        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");


//        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

//        指定分区器
        props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.ekko.kafka.partition.MyPartitioner");

//        创建生产者对象
        KafkaProducer producer = new KafkaProducer<String,String>(props);

        //发布数据
        for (int i = 0; i < 10; i++) {
//            无key时,轮询
//            producer.send(new ProducerRecord("second","ekko->"+i));

//            指定key,按key hash
//            producer.send(new ProducerRecord("second","key"+i,"ekko=>"+i));

//            指定分区号 ,key给null值也行
           /* if(i<5){
                producer.send(new ProducerRecord("second",0,"key"+i,"ekko=>"+i));
            }else {
                producer.send(new ProducerRecord("second",1,"key"+i,"ekko=>"+i));
            }
            */
//            自定义分区器
            if(i<5){
                producer.send(new ProducerRecord("second","ekko==>"+i));  // 0
            }else{
                producer.send(new ProducerRecord("second","key","ekko==>"+i)); //1
            }
        }
        producer.close();
    }
}

带回调方法的生产者

public class MyCallBackProducer {
    public static void main(String[] args) throws Exception {

        Properties props = new Properties();

        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);
        props.put(ProducerConfig.ACKS_CONFIG,"all");
        props.put(ProducerConfig.RETRIES_CONFIG,5);
        props.put(ProducerConfig.LINGER_MS_CONFIG,1);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);

        /*
        for (int i = 0; i < 10; i++) {

//            回调方法,当前消息发送出去,会执行回调方法,当发送失败,会返回异常。
            producer.send(new ProducerRecord<String, String>("second","ekko++>"+i), new Callback() {
                public void onCompletion(RecordMetadata metadata, Exception exception) {

                    if(exception==null){
                        System.out.println(metadata.topic()+"--"+metadata.partition()+"--"+metadata.offset());
                    }
                }
            });
        }

         */
        for (int i = 0; i < 10; i++) {

            Future<RecordMetadata> future = producer.send(new ProducerRecord<String, String>("second", "ekko++>" + i), new Callback() {
                public void onCompletion(RecordMetadata metadata, Exception exception) {

                    if (exception == null) {
                        System.out.println(metadata.topic() + "--" + metadata.partition() + "--" + metadata.offset());
                    }
                }
            });
//            阻塞等待,同步发送
            future.get();
        }
//        TimeUnit.MILLISECONDS.sleep(100);
      
        producer.close();

    }
}

自定义分区器

public class MyPartitioner implements Partitioner {

//    只考虑second主题,只有2个分区
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        if(key==null){
            return 0;
        }
        else{
            return 1;
        }
    }
  
    public void close() {
    }
    public void configure(Map<String, ?> configs) {
    }
}

Kafka消费者

public class MyConsumer {
    public static void main(String[] args) {

        Properties props = new Properties();

        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");
//        开启自动提交offset
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);

        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);


//        重置offset。默认latest
//        满足两个条件:1.当前的消费者组没有消费过所订阅的主题 2.当前消费者组使用的offset在kafka集群中已经被删除
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");

//        自动提交offset的间隔
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,1000);
//        指定消费者组
        props.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
//        指定反序列化器
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");

//        创建消费者对象
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);

//        订阅主题
        consumer.subscribe(Arrays.asList("first","second","third"));

        
//        消费数据
        while (true){

            ConsumerRecords<String, String> records = consumer.poll(1000);

            for (ConsumerRecord<String, String> record : records) {

                System.out.println(record.topic()+"--"+record.partition()+"--"+record.offset()+"--"+record.key()+"--"+record.value());
            }

//            手动提交offset

//            同步提交,阻塞
            consumer.commitSync();

//            异步提交
//            consumer.commitAsync();
        }
    }
}

Kafka拦截器

示例代码是2个拦截器组成的拦截器链

/*
* 在所有消息内容前加时间戳
* */
public class TimeInterceptor implements ProducerInterceptor<String,String> {

    public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {

        String value=record.value();
        value=System.currentTimeMillis()+"--"+value;

        ProducerRecord<String, String> resultRecord = new ProducerRecord<String, String>(record.topic(), record.partition(), record.key(), value);

        return resultRecord;
    }

    public void onAcknowledgement(RecordMetadata metadata, Exception exception) {

    }

    public void close() {

    }

    public void configure(Map<String, ?> configs) {

    }
}
/*
* 计算消息发送成功和失败的个数
* */

public class CountInterceptor implements ProducerInterceptor<String,String> {

    private Integer success=0;
    private Integer fail=0;

    public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
        return record;
    }

    public void onAcknowledgement(RecordMetadata metadata, Exception exception) {

        if(exception==null){
            success++;
        }else {
            fail++;
        }
    }

    public void close() {

        System.out.println("success="+success);
        System.out.println("fail="+fail);
    }

    public void configure(Map<String, ?> configs) {

    }
}
public class InterceptorProducer {

    public static void main(String[] args) {

        //      创建配置对象
        Properties props = new Properties();


        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");

        props.put(ProducerConfig.ACKS_CONFIG,"all");

        props.put(ProducerConfig.RETRIES_CONFIG,5);

        props.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);

        props.put(ProducerConfig.LINGER_MS_CONFIG,1);

        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);

        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

       //设置拦截器
        List<String> interceptors=new ArrayList<String>();
        interceptors.add("com.ekko.kafka.interceptor.TimeInterceptor");
        interceptors.add("com.ekko.kafka.interceptor.CountInterceptor");

        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,interceptors);

//        创建生产者对象
        KafkaProducer producer = new KafkaProducer<String,String>(props);

        for (int i = 0; i < 10; i++) {

            producer.send(new ProducerRecord("second","ekko==>"+i));
        }

        producer.close();
    }
}