springboot整合kafka

2,458 阅读3分钟

Springboot整合kafka

单机kafka搭建

1.安装zookeeper

下载zookeeper的解压包 mirrors.tuna.tsinghua.edu.cn/apache/zook… 这里的安装路径为/usr/local/software

[root@hanjiaxv bin]# cd /usr/local/software/
[root@hanjiaxv software]# ls
kafka_2.12-2.3.1.tgz zookeeper-3.4.14.tar.gz
[root@hanjiaxv software]# tar -zxvf zookeeper-3.4.14.tar.gz 
[root@hanjiaxv software]# mv zookeeper-3.4.14 zookeeper
[root@hanjiaxv software]# cd zookeeper/
[root@hanjiaxv zookeeper]# ./bin/zkServer.sh start

2.安装kafka

下载解压包 www.apache.org/dyn/closer.… ,安装路径为/usr/local/software

[root@hanjiaxv software]# tar -zxvf kafka_2.12-2.3.1.tgz 
[root@hanjiaxv software]# mv kafka_2.12-2.3.1 kafka 
[root@hanjiaxv software]# cd kafka 
#更改配置
[root@hanjiaxv kafka]# vim config/server.properties 
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# see kafka.server.KafkaConfig for additional details and defaults

############################# Server Basics #############################

# The id of the broker. This must be set to a unique integer for each broker.
broker.id=1

############################# Socket Server Settings #############################
host.name=192.168.189.144
# The address the socket server listens on. It will get the value returned from 
# java.net.InetAddress.getCanonicalHostName() if not configured.
#   FORMAT:
#     listeners = listener_name://host_name:port
#   EXAMPLE:
#     listeners = PLAINTEXT://your.host.name:9092
#listeners=PLAINTEXT://:9092
listeners = PLAINTEXT://192.168.189.144:9092

# Hostname and port the broker will advertise to producers and consumers. If not set, 
# it uses the value for "listeners" if configured.  Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
advertised.listeners=PLAINTEXT://192.168.189.144:9092

#启动kafka
[root@hanjiaxv bin]# kafka-server-start.sh -daemon ../config/server.properties
[root@hanjiaxv kafka]# jps
17953 QuorumPeerMain
19605 ConsoleConsumer
18280 Kafka
18349 ConsoleProducer
12111 ProdServerStart
22143 Jps

springboot整合kafka

1.引入依赖

    <dependencies>
        <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
        </dependency>
    </dependencies>

2.编写配置文件

server:
  port: 8082
spring:
  application:
    name: pg-kafka-producer
  kafka:
    #指定kafka代理地址
    bootstrap-servers: 192.168.189.144:9092
    #消息生产者配置
    producer:
      retries: 0
      #每次批量发送消息数量
      batch-size: 16384
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
      group-id: test-hello-group
      auto-offset-reset: earliest
      enable-auto-commit: true
      auto-commit-interval: 100
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
eureka:
  client:
    service-url:
      defaultZone: http://localhost:8761/eureka/
  instance:
    prefer-ip-address: true

3.编写生产者和消费者

package com.isch.pgkafkaproducer.pojo;

import lombok.Data;


/**
 * @ClassName: Message
 * @Description 实体类
 * @Author: Hanjiaxv
 * @Date: 2019/11/18 14:29
 * @Version: V1.0
 **/
@Data
public class Message {
    //任务id
    private Long taskId;
    //发送时间
    private String sendTime;
    //发送内容
    private String Msg;
}
package com.isch.pgkafkaproducer.Service;

import com.alibaba.fastjson.JSON;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.isch.pgkafkaproducer.pojo.Message;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;

import java.util.Date;
import java.util.UUID;


/**
 * @ClassName: ProducerService
 * @Description 生产者
 * @Author: Hanjiaxv
 * @Date: 2019/11/18 14:01
 * @Version: V1.0
 **/
@Service
public class ProducerService {
    @Autowired
    private KafkaTemplate<String,String> kafkaTemplate;
    public void send(){
        for (int i=0;i<5;i++){
            Message message=new Message();
            message.setTaskId(System.currentTimeMillis());
            message.setMsg(UUID.randomUUID().toString()+"...."+i);
            message.setSendTime(new Date().toString());
            kafkaTemplate.send("hello", JSON.toJSONString(message));
        }
    }
}
package com.isch.pgkafkaproducer.Service;
import com.alibaba.fastjson.JSONObject;

import com.isch.pgkafkaproducer.pojo.Message;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Service;

import java.util.Optional;

/**
 * @ClassName: ConsumerService
 * @Description 消费者
 * @Author: Hanjiaxv
 * @Date: 2019/11/18 14:37
 * @Version: V1.0
 **/
@Service
public class ConsumerService {
    @KafkaListener(topics = {"hello"})
    public void listen(ConsumerRecord<String,String> record){
        Optional<?> kafkaMessage= Optional.ofNullable(record.value());
        if (kafkaMessage.isPresent()){
            Object message=  kafkaMessage.get();
            System.out.println("-----------------------------record="+record);
            System.out.println("-----------------------------message="+message);
            Message message1=JSONObject.toJavaObject(JSONObject.parseObject((String) message),Message.class);
            System.out.println(message1.getSendTime());
        }
    }
}

4.编写测试类

package com.isch.pgkafkaproducer.controller;

import com.isch.pgkafkaproducer.Service.ProducerService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;

/**
 * @ClassName: KafkaTestController
 * @Description 测试类
 * @Author: Hanjiaxv
 * @Date: 2019/11/18 14:40
 * @Version: V1.0
 **/
@RestController
public class KafkaTestController {
    @Autowired
    private ProducerService producer;
    @RequestMapping("/testSendMsg")
    @ResponseBody
    public String testSentMsg(){
        producer.send();
        return "success";
    }
}

通过访问http://localhost:8082/testSendMsg