linux上kafka的安装和kafka对SpringBoot的基本集成

325 阅读3分钟

1.没有服务器的可以先在windows电脑上安装一个虚拟机,虚拟机内存最好2G+

Dingtalk_20220402094221.jpg

2.下载kafka --网盘地址https://pan.baidu.com/s/1_eSWoubkpMTjSXCg1CR-ug?pwd=dqna

1.解压 tar zxvf kafka_2.13-2.7.0.tgz

2.安装kafka需要JDK环境,windows虚拟机自带,没有的小伙伴自行搜索

查看java版本 java -version

Dingtalk_20220402095923.jpg

3.准备工作

1.如果没有文件夹权限 chmod -r 777 kafka_2.13-2.7.0

2.查看防火墙 systemctl status firewalld #zookeeper和kafak需要开放端口2181和9092,这边因为是本地虚拟机所以直接关闭防火墙 systemctl stop firewalld

Dingtalk_20220402095828.jpg

3.进入解压后的文件夹 cd kafka_2.13-2.7.0

1.在当前新建两个日志保存文件zook-logs,和kafka-logs

Dingtalk_20220402100829.jpg 2.修改配置 进入文件夹 cd config 修改kafka的配置文件 server.properties 修改listeners=PLAINTEXT://192.168.245.129:9092 虚拟机IP 不知道自己虚拟机IP的 ifconfig Dingtalk_20220402101426.jpg Dingtalk_20220402101155.jpg

修改日志存放地址,因为是平级目录(所有消息都存放在这个文件夹)

Dingtalk_20220402101217.jpg

3.因为kafak运行要基于zookeeper 指定 zookeeper地址

Dingtalk_20220402101250.jpg

4.修改 zookeeper.properties文件

Dingtalk_20220402102216.jpg

4.启动kafak服务

1.kafka服务基于zookeeper 需要先启动zookeeper,进入到解压目录

Dingtalk_20220402102607.jpg

2.启动zookeeper

bin/zookeeper-server-start.sh config/zookeeper.properties

Dingtalk_20220402103341.jpg

3.启动 kafka

bin/kafka-server-start.sh config/server.properties

Dingtalk_20220402103524.jpg 有问题的话检查配置文件,这样kafka就已经启动好了

5.springboot集成kafka

1.引包

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
    <version>2.5.10.RELEASE</version>
</dependency>
```
```

2.消费者配置

/**
 * @Author Huang  消费者配置
 * @Date 2022-03-30 上午 10:51
 * @Version 4.3
 */
@Configuration
public class ConsumerConfigure {
    //kafka配置工厂
    @Bean
    public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        return factory;
    }

    private ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<Integer, String>(consumerConfigs());
    }

    private Map<String, Object> consumerConfigs() {
        HashMap<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.245.129:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); //消费者分组ID
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");  //enable.auto.commit=false就会由我们自己手动地来处理这个事情,ture由kafka自动提交
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); //auto.commit.interval.ms 如果 enable.auto.commit 设置成 false 改设置也就不被再考虑了
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //反序列化 key
        props.put("value.deserializer", "com.ruoyi.web.controller.system.kafka.DecodingKafka");//反序列化value 自定义类实现原本的拦截器
        return props;
    }

    @Bean
    public SimpleConsumerListener simpleConsumerListener() {
        return new SimpleConsumerListener();
    }
```
```

3.生产者配置

```
/**
 * @Author Huang   生产者配置
 * @Date 2022-03-30 上午 10:50
 * @Version 4.3
 */
@Configuration
public class ProducerConfigure {

    public ProducerFactory<Integer, String> producerFactory() {
        return new DefaultKafkaProducerFactory<Integer, String>(producerConfigs());
    }

    private Map<String, Object> producerConfigs() { //消息生产者配置信息
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.245.129:9092"); //bootstrap.servers
        props.put(ProducerConfig.ACKS_CONFIG, "all");//acks =all,表示只有所有参与复制的节点(ISR列表的副本)全部收到消息时
        props.put(ProducerConfig.RETRIES_CONFIG, 0); //针对可重试异常自动发起重试的次数
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); //batch.size 吞吐量越大越好,越大越占内存*超过这个大小就会发送消息
        props.put(ProducerConfig.LINGER_MS_CONFIG, 1); //linger.ms  超过这个毫秒数就会发送消息     batch.size和linger.ms 就是哪个条件先满足就都会将消息发送出去*Kafka需要考虑高吞吐量与延时的平衡.
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); //buffer.memory 缓冲的大小的
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); //序列化key
        props.put("value.serializer", "com.ruoyi.web.controller.system.kafka.EncodeKafka"); //序列化value 自定义类型 指定类文件
 
        return props;
    }

    //将生产者配置注入到容器中
    @Bean
    public KafkaTemplate<Integer, String> kafkaTemplate() {
        return new KafkaTemplate<Integer, String>(producerFactory());
    }
}
```
```
```

生产者和消费者配置也可以写入配置文件

Dingtalk_20220402111024.jpg 4.序列和反序列文件,类路径自行修改(kafak默认是string这里我们重写他的序列化类可以返回自定义类型)


/**
 * @Author Huang 重写了Serializer自定义序列化类
 * @Date 2022-03-30 下午 03:40
 * @Version 4.3
 */
public class EncodeKafka implements Serializer<Object>{

    public EncodeKafka() {
        // TODO Auto-generated constructor stub
    }
    @Override
    public void configure(Map<String, ?> configs, boolean isKey) {
        // TODO Auto-generated method stub
    }
    @Override
    public byte[] serialize(String topic, Object data) {
        //只需重写这个方法即可
        return BeanUtils.beanToByte(data);
    }
    @Override
    public void close() {
        // TODO Auto-generated method stub
    }
}
```
```
```
/**
 * @Author Huang
 * @Date 2022-03-30 下午 03:58 重写了Deserializer自定义反序列化类
 * @Version 4.3
 */
public class DecodingKafka implements Deserializer<Object> {

    @Override
    public void configure(Map<String, ?> configs, boolean isKey) {
    }

    @Override
    public Object deserialize(String topic, byte[] data) {
        return BeanUtils.byte2Obj(data);
    }

    @Override
    public void close() {

    }
}
```
```
/**
 * 对象序列化为byte数组
 *
 * @param obj
 * @return
 */
public static byte[] beanToByte(Object obj) {
    byte[] bb = null;
    try (ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
         ObjectOutputStream outputStream = new ObjectOutputStream(byteArray)){
        outputStream.writeObject(obj);
        outputStream.flush();
        bb = byteArray.toByteArray();
    } catch (IOException e) {
        e.printStackTrace();
    }
    return bb;
}

public static Object byte2Obj(byte[] bytes) {
    Object readObject = null;
    try (ByteArrayInputStream in = new ByteArrayInputStream(bytes);
         ObjectInputStream inputStream = new ObjectInputStream(in)){
        readObject = inputStream.readObject();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return readObject;
}
```
```

5.创建主题

/**
 * @Author Huang
 * @Date 2022-03-30 上午 10:48
 * @Version 4.3
 */
@Configuration
public class TopicConfigure {

    @Bean
    public KafkaAdmin admin() {
        Map<String, Object> configs = new HashMap<>();
        configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.245.129:9092");
        return new KafkaAdmin(configs);
    }

    //创建Topic
    @Bean
    public NewTopic helloWorld1() {
        return new NewTopic("HelloWorld1", 10, (short) 1); //分区
    }

    @Bean
    public NewTopic helloWorld2() {
        return new NewTopic("HelloWorld2", 10, (short) 1);
    }

    @Bean
    public NewTopic helloWorld3() {
        return new NewTopic("HelloWorld3", 10, (short) 1);
    }

    @Bean
    public NewTopic helloWorld4() {
        return new NewTopic("HelloWorld4", 5, (short) 1); //分区
    }
}
```
```

启动java程序,查看主题是否创建成功 bin/kafka-topics.sh --list --zookeeper 192.168.245.129:2181

Dingtalk_20220402110601.jpg

6.创建消费者

/**
 * @Author Huang
 * @Date 2022-03-30 上午 10:52
 * @Version 4.3
 */
@Log4j2
public class SimpleConsumerListener {
    //groupId 消费者分组ID  containerFactory = "kafkaListenerContainerFactory" containerFactory不写为默认为kafkaListenerContainerFactory
    @KafkaListener(id = "test", topics = {"HelloWorld", "HelloWorld3"})
    public void onMessage1(String message) {
        log.info(">kafka-topic0-3接收结果:{}", message);
    }

    @KafkaListener(topics = "HelloWorld1")
    public void onMessage2(Object message) {
        log.info(">kafka-topic1接收结果:{}", message);
    }

    @KafkaListener(topics = "HelloWorld2")
    public void onMessage3(String message) {
        log.info(">kafka-topic2接收结果:{}", message);
    }
}
```
```

7.创建生产者


/**
 * @Author Huang
 * @Date 2022-03-30 上午 11:34
 * @Version 4.3
 */
@RestController
@Log4j2
public class KafkaController {

    @Resource
    private KafkaTemplate<String, Object> kafkaTemplate;
    final static int HUNDRED = 1;

    @GetMapping("/send/{topic}/{message}")
    public String sendStr(@PathVariable("topic") String topic, @PathVariable("message") String message) {
        for (int i = 0; i < HUNDRED; i++) {
            kafkaTemplate.send(topic, "第" + i + "条" + message);
        }
        return message;
    }


  
}
```
```

8.测试生产者 这边我使用的是Postman http://localhost:8088/send/HelloWorld/我发送的消息

消费者打印 Dingtalk_20220402111301.jpg 简答的生产消息和消费消息就完成了