运行环境
Linux CentOS
JDK8
Zookeeper
Kafka
JDK
Kafka从2.0.0版本就不再支持JDK7及以下版本
Zookeeper
分布式协调系统,Zookeeper是安装Kafka集群的必要组件,Kafka通过Zookeeper来实施对元数据信息的管理,包括集群、broker、主题、分区等内容。
服务器演示
启动zookeeper和kafka
## 启动zookeeper
./bin/kafka-server-start.sh config/server.properties &
## 启动kafka
./bin/kafka-server-start.sh config/server.properties &
## 查看kafka是否启动
jps
创建Topic(topic-demo)
bin/kafka-topics.sh --zookeeper localhost:2181/kafka --create --topic topic-demo --replication-factor 1 --partitions 1
创建Consumer
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic topic-demo
创建Producer
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic topic-demo
代码演示
生产者类Producer
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.Random;
public class Producer {
public static final String brokerList = "ip:9092";
public static final String topic = "topic-demo";
public static Properties initConfig(){
Properties properties = new Properties();
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
return properties;
}
public static void main(String[] args) {
Properties properties = initConfig();
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
try {
int i = 0;
while (i <10){
String msg = "Hello," + new Random().nextInt(100);
//消息对象
ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic, msg);
//发送
producer.send(record);
System.out.println("消息发送成功:" + msg);
Thread.sleep(500);
i++;
}
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
}
}
}
消费者类Consumer
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
@Slf4j
public class Consumer {
public static final String brokerList = "ip:9092";
public static final String topic = "topic-demo";
public static final String groupId = "group.demo";
public static final AtomicBoolean isRunning = new AtomicBoolean(true);
public static Properties initConfig() {
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return properties;
}
public static void main(String[] args) {
Properties properties = initConfig();
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
// 订阅消息
consumer.subscribe(Collections.singletonList(topic));
try {
while (isRunning.get()) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
for (ConsumerRecord<String, String> record : records) {
System.out.println(String.format("topic:%s,offset:%d,消息:%s",
record.topic(), record.offset(), record.value()));
}
}
} catch (Exception e) {
log.error(e.getMessage());
} finally {
consumer.close();
}
}
}