Java学习第17天 - 消息队列与异步处理

178 阅读19分钟

学习时间: 4-5小时
学习目标: 掌握Spring中的消息队列使用,学会RabbitMQ和Kafka集成,理解AMQP和MQTT协议,实现异步处理模式


详细学习清单


✅ 第一部分:消息队列基础概念与Spring支持(60分钟)

1. 消息队列核心概念

消息队列的作用与优势

// MessageQueueBasics.java
package com.example.demo.mq;

import java.util.HashMap;
import java.util.Map;

public class MessageQueueBasics {
    
    public static class QueueAdvantage {
        private String name;
        private String description;
        private String benefit;
        private String springSupport;
        
        public QueueAdvantage(String name, String description, String benefit, String springSupport) {
            this.name = name;
            this.description = description;
            this.benefit = benefit;
            this.springSupport = springSupport;
        }
        
        // Getter方法
        public String getName() { return name; }
        public String getDescription() { return description; }
        public String getBenefit() { return benefit; }
        public String getSpringSupport() { return springSupport; }
    }
    
    public static void main(String[] args) {
        Map<String, QueueAdvantage> advantages = new HashMap<>();
        
        // 异步处理
        advantages.put("async", new QueueAdvantage(
            "异步处理",
            "发送方不需要等待接收方处理完成",
            "提高系统响应速度,改善用户体验",
            "@Async注解 + 线程池配置"
        ));
        
        // 服务解耦
        advantages.put("decoupling", new QueueAdvantage(
            "服务解耦",
            "服务间通过消息通信,不直接依赖",
            "降低系统耦合度,便于独立开发和部署",
            "Spring AMQP + Spring Kafka"
        ));
        
        // 流量削峰
        advantages.put("traffic", new QueueAdvantage(
            "流量削峰",
            "突发流量通过队列缓冲,平滑处理",
            "保护系统稳定性,避免过载崩溃",
            "消息持久化 + 消费者限流"
        ));
        
        // 可靠性保证
        advantages.put("reliability", new QueueAdvantage(
            "可靠性保证",
            "消息持久化,确保不丢失",
            "提高系统可靠性,保证数据完整性",
            "消息确认机制 + 死信队列"
        ));
        
        System.out.println("=== Spring消息队列主要优势 ===");
        for (Map.Entry<String, QueueAdvantage> entry : advantages.entrySet()) {
            QueueAdvantage advantage = entry.getValue();
            System.out.println("\n优势: " + advantage.getName());
            System.out.println("描述: " + advantage.getDescription());
            System.out.println("好处: " + advantage.getBenefit());
            System.out.println("Spring支持: " + advantage.getSpringSupport());
        }
    }
}

2. Spring消息队列支持

Spring消息队列抽象层

// SpringMessageQueueSupport.java
package com.example.demo.mq;

import org.springframework.amqp.core.Message;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

public class SpringMessageQueueSupport {
    
    /**
     * Spring AMQP支持(RabbitMQ)
     */
    public static class SpringAMQPSupport {
        private RabbitTemplate rabbitTemplate;
        
        public void sendMessage(String exchange, String routingKey, Object message) {
            // 发送消息到RabbitMQ
            rabbitTemplate.convertAndSend(exchange, routingKey, message);
        }
        
        public void sendMessageWithCallback(String exchange, String routingKey, Object message) {
            // 带回调的消息发送
            rabbitTemplate.convertAndSend(exchange, routingKey, message, new org.springframework.amqp.core.MessagePostProcessor() {
                @Override
                public Message postProcessMessage(Message message) {
                    // 设置消息属性
                    message.getMessageProperties().setDeliveryMode(org.springframework.amqp.core.MessageDeliveryMode.PERSISTENT);
                    return message;
                }
            });
        }
    }
    
    /**
     * Spring Kafka支持
     */
    public static class SpringKafkaSupport {
        private KafkaTemplate<String, Object> kafkaTemplate;
        
        public void sendMessage(String topic, Object message) {
            // 发送消息到Kafka
            kafkaTemplate.send(topic, message);
        }
        
        public void sendMessageWithCallback(String topic, Object message) {
            // 带回调的消息发送
            ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(topic, message);
            
            future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
                @Override
                public void onSuccess(SendResult<String, Object> result) {
                    System.out.println("消息发送成功: " + result.getRecordMetadata());
                }
                
                @Override
                public void onFailure(Throwable ex) {
                    System.err.println("消息发送失败: " + ex.getMessage());
                }
            });
        }
    }
}

✅ 第二部分:RabbitMQ与AMQP协议实现(90分钟)

1. AMQP协议基础

AMQP协议特点

// AMQPProtocolBasics.java
package com.example.demo.mq.amqp;

import java.util.HashMap;
import java.util.Map;

public class AMQPProtocolBasics {
    
    public static class AMQPFeature {
        private String name;
        private String description;
        private String benefit;
        private String implementation;
        
        public AMQPFeature(String name, String description, String benefit, String implementation) {
            this.name = name;
            this.description = description;
            this.benefit = benefit;
            this.implementation = implementation;
        }
        
        // Getter方法
        public String getName() { return name; }
        public String getDescription() { return description; }
        public String getBenefit() { return benefit; }
        public String getImplementation() { return implementation; }
    }
    
    public static void main(String[] args) {
        Map<String, AMQPFeature> features = new HashMap<>();
        
        // 消息确认
        features.put("ack", new AMQPFeature(
            "消息确认机制",
            "消费者处理完消息后发送确认,确保消息不丢失",
            "保证消息可靠传递,支持事务回滚",
            "channel.basicAck() + 手动确认模式"
        ));
        
        // 消息路由
        features.put("routing", new AMQPFeature(
            "消息路由",
            "根据路由键将消息分发到不同队列",
            "灵活的消息分发策略,支持复杂业务逻辑",
            "DirectExchange + TopicExchange + FanoutExchange"
        ));
        
        // 消息持久化
        features.put("persistence", new AMQPFeature(
            "消息持久化",
            "消息存储在磁盘,服务重启不丢失",
            "提高系统可靠性,保证数据完整性",
            "MessageDeliveryMode.PERSISTENT"
        ));
        
        // 死信队列
        features.put("dlq", new AMQPFeature(
            "死信队列",
            "处理无法正常消费的消息",
            "问题排查和消息恢复,提高系统健壮性",
            "x-dead-letter-exchange + x-dead-letter-routing-key"
        ));
        
        System.out.println("=== AMQP协议核心特性 ===");
        for (Map.Entry<String, AMQPFeature> entry : features.entrySet()) {
            AMQPFeature feature = entry.getValue();
            System.out.println("\n特性: " + feature.getName());
            System.out.println("描述: " + feature.getDescription());
            System.out.println("好处: " + feature.getBenefit());
            System.out.println("实现: " + feature.getImplementation());
        }
    }
}

2. RabbitMQ配置与集成

Maven依赖配置

<!-- pom.xml -->
<dependencies>
    <!-- Spring Boot Starter -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    
    <!-- Spring AMQP (RabbitMQ) -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-amqp</artifactId>
    </dependency>
    
    <!-- Spring Kafka -->
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>
    
    <!-- JSON处理 -->
    <dependency>
        <groupId>com.fasterxml.jackson.core</groupId>
        <artifactId>jackson-databind</artifactId>
    </dependency>
    
    <!-- 工具类 -->
    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <optional>true</optional>
    </dependency>
</dependencies>

RabbitMQ配置类

// RabbitMQConfig.java
package com.example.demo.config;

import org.springframework.amqp.core.*;
import org.springframework.amqp.rabbit.connection.ConnectionFactory;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.amqp.support.converter.Jackson2JsonMessageConverter;
import org.springframework.amqp.support.converter.MessageConverter;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration
public class RabbitMQConfig {

    @Value("${spring.rabbitmq.host:localhost}")
    private String host;
    
    @Value("${spring.rabbitmq.port:5672}")
    private int port;
    
    @Value("${spring.rabbitmq.username:guest}")
    private String username;
    
    @Value("${spring.rabbitmq.password:guest}")
    private String password;

    /**
     * 消息转换器
     */
    @Bean
    public MessageConverter jsonMessageConverter() {
        return new Jackson2JsonMessageConverter();
    }

    /**
     * RabbitTemplate配置
     */
    @Bean
    public RabbitTemplate rabbitTemplate(ConnectionFactory connectionFactory) {
        RabbitTemplate template = new RabbitTemplate(connectionFactory);
        template.setMessageConverter(jsonMessageConverter());
        
        // 发布确认回调
        template.setConfirmCallback((correlationData, ack, cause) -> {
            if (ack) {
                System.out.println("消息发送成功: " + correlationData);
            } else {
                System.err.println("消息发送失败: " + cause);
            }
        });
        
        // 返回回调
        template.setReturnsCallback(returned -> {
            System.err.println("消息被退回: " + returned);
        });
        
        return template;
    }

    /**
     * 订单交换机
     */
    @Bean
    public DirectExchange orderExchange() {
        return new DirectExchange("order.exchange");
    }

    /**
     * 通知交换机
     */
    @Bean
    public FanoutExchange notificationExchange() {
        return new FanoutExchange("notification.exchange");
    }

    /**
     * 死信交换机
     */
    @Bean
    public DirectExchange deadLetterExchange() {
        return new DirectExchange("dead.letter.exchange");
    }

    /**
     * 订单创建队列
     */
    @Bean
    public Queue orderCreatedQueue() {
        return QueueBuilder.durable("order.created.queue")
                .withArgument("x-dead-letter-exchange", "dead.letter.exchange")
                .withArgument("x-dead-letter-routing-key", "dead.letter")
                .withArgument("x-message-ttl", 300000) // 5分钟过期
                .build();
    }

    /**
     * 订单支付队列
     */
    @Bean
    public Queue orderPaidQueue() {
        return QueueBuilder.durable("order.paid.queue")
                .withArgument("x-dead-letter-exchange", "dead.letter.exchange")
                .withArgument("x-dead-letter-routing-key", "dead.letter")
                .build();
    }

    /**
     * 通知队列
     */
    @Bean
    public Queue notificationQueue() {
        return QueueBuilder.durable("notification.queue")
                .withArgument("x-dead-letter-exchange", "dead.letter.exchange")
                .withArgument("x-dead-letter-routing-key", "dead.letter")
                .build();
    }

    /**
     * 死信队列
     */
    @Bean
    public Queue deadLetterQueue() {
        return QueueBuilder.durable("dead.letter.queue").build();
    }

    /**
     * 绑定订单创建队列到交换机
     */
    @Bean
    public Binding orderCreatedBinding() {
        return BindingBuilder.bind(orderCreatedQueue())
                .to(orderExchange())
                .with("order.created");
    }

    /**
     * 绑定订单支付队列到交换机
     */
    @Bean
    public Binding orderPaidBinding() {
        return BindingBuilder.bind(orderPaidQueue())
                .to(orderExchange())
                .with("order.paid");
    }

    /**
     * 绑定通知队列到交换机
     */
    @Bean
    public Binding notificationBinding() {
        return BindingBuilder.bind(notificationQueue())
                .to(notificationExchange());
    }

    /**
     * 绑定死信队列到交换机
     */
    @Bean
    public Binding deadLetterBinding() {
        return BindingBuilder.bind(deadLetterQueue())
                .to(deadLetterExchange())
                .with("dead.letter");
    }
}

3. RabbitMQ消息生产者

订单消息生产者

// OrderMessageProducer.java
package com.example.demo.producer;

import com.example.demo.model.OrderMessage;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.List;

@Slf4j
@Service
public class OrderMessageProducer {

    @Autowired
    private RabbitTemplate rabbitTemplate;

    /**
     * 发送订单创建消息
     */
    public void sendOrderCreated(OrderMessage orderMessage) {
        try {
            String exchange = "order.exchange";
            String routingKey = "order.created";
            rabbitTemplate.convertAndSend(exchange, routingKey, orderMessage);
            log.info("订单创建消息发送成功: {}", orderMessage.getOrderNumber());
        } catch (Exception e) {
            log.error("订单创建消息发送失败: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("消息发送失败", e);
        }
    }

    /**
     * 发送订单支付消息
     */
    public void sendOrderPaid(OrderMessage orderMessage) {
        try {
            String exchange = "order.exchange";
            String routingKey = "order.paid";
            rabbitTemplate.convertAndSend(exchange, routingKey, orderMessage);
            log.info("订单支付消息发送成功: {}", orderMessage.getOrderNumber());
        } catch (Exception e) {
            log.error("订单支付消息发送失败: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("消息发送失败", e);
        }
    }

    /**
     * 发送延迟消息
     */
    public void sendDelayedMessage(OrderMessage orderMessage, long delayMillis) {
        try {
            String exchange = "order.exchange";
            String routingKey = "order.delayed";
            rabbitTemplate.convertAndSend(exchange, routingKey, orderMessage, message -> {
                message.getMessageProperties().setExpiration(String.valueOf(delayMillis));
                return message;
            });
            log.info("延迟消息发送成功: {}, 延迟: {}ms", orderMessage.getOrderNumber(), delayMillis);
        } catch (Exception e) {
            log.error("延迟消息发送失败: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("延迟消息发送失败", e);
        }
    }

    /**
     * 发送批量消息
     */
    public void sendBatchMessages(List<OrderMessage> messages) {
        try {
            String exchange = "order.exchange";
            String routingKey = "order.batch";
            for (OrderMessage message : messages) {
                rabbitTemplate.convertAndSend(exchange, routingKey, message);
            }
            log.info("批量消息发送成功,数量: {}", messages.size());
        } catch (Exception e) {
            log.error("批量消息发送失败", e);
            throw new RuntimeException("批量消息发送失败", e);
        }
    }
}

4. RabbitMQ消息消费者

订单消息消费者

// OrderMessageConsumer.java
package com.example.demo.consumer;

import com.example.demo.model.OrderMessage;
import com.example.demo.service.OrderService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.core.Message;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.amqp.support.AmqpHeaders;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.stereotype.Service;

import com.rabbitmq.client.Channel;
import java.io.IOException;

@Slf4j
@Service
public class OrderMessageConsumer {

    @Autowired
    private OrderService orderService;

    /**
     * 消费订单创建消息
     */
    @RabbitListener(queues = "order.created.queue")
    public void consumeOrderCreated(OrderMessage orderMessage, 
                                  Message message, 
                                  Channel channel,
                                  @Header(AmqpHeaders.DELIVERY_TAG) long deliveryTag) {
        try {
            log.info("收到订单创建消息: {}", orderMessage.getOrderNumber());
            
            // 处理订单创建逻辑
            orderService.processOrderCreated(orderMessage);
            
            // 手动确认消息
            channel.basicAck(deliveryTag, false);
            log.info("订单创建消息处理成功: {}", orderMessage.getOrderNumber());
            
        } catch (Exception e) {
            log.error("订单创建消息处理失败: {}", orderMessage.getOrderNumber(), e);
            
            try {
                // 消息处理失败,拒绝消息并重新入队
                channel.basicNack(deliveryTag, false, true);
            } catch (IOException ioException) {
                log.error("消息拒绝失败", ioException);
            }
        }
    }

    /**
     * 消费订单支付消息
     */
    @RabbitListener(queues = "order.paid.queue")
    public void consumeOrderPaid(OrderMessage orderMessage, 
                                Message message, 
                                Channel channel,
                                @Header(AmqpHeaders.DELIVERY_TAG) long deliveryTag) {
        try {
            log.info("收到订单支付消息: {}", orderMessage.getOrderNumber());
            
            // 处理订单支付逻辑
            orderService.processOrderPaid(orderMessage);
            
            // 手动确认消息
            channel.basicAck(deliveryTag, false);
            log.info("订单支付消息处理成功: {}", orderMessage.getOrderNumber());
            
        } catch (Exception e) {
            log.error("订单支付消息处理失败: {}", orderMessage.getOrderNumber(), e);
            
            try {
                // 消息处理失败,拒绝消息并重新入队
                channel.basicNack(deliveryTag, false, true);
            } catch (IOException ioException) {
                log.error("消息拒绝失败", ioException);
            }
        }
    }
}

✅ 第三部分:Kafka消息队列实现(90分钟)

1. Kafka配置与集成

Kafka配置类

// KafkaConfig.java
package com.example.demo.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConfig {

    @Value("${spring.kafka.bootstrap-servers:localhost:9092}")
    private String bootstrapServers;

    @Value("${spring.kafka.consumer.group-id:default-group}")
    private String groupId;

    /**
     * Kafka生产者配置
     */
    @Bean
    public ProducerFactory<String, Object> producerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        
        // 生产者配置
        configProps.put(ProducerConfig.ACKS_CONFIG, "all");
        configProps.put(ProducerConfig.RETRIES_CONFIG, 3);
        configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        configProps.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        configProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        
        return new DefaultKafkaProducerFactory<>(configProps);
    }

    /**
     * KafkaTemplate
     */
    @Bean
    public KafkaTemplate<String, Object> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

    /**
     * Kafka消费者配置
     */
    @Bean
    public ConsumerFactory<String, Object> consumerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        
        // 消费者配置
        configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 500);
        configProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 30000);
        configProps.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
        
        return new DefaultKafkaConsumerFactory<>(configProps);
    }

    /**
     * Kafka监听器容器工厂
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory = 
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        
        // 设置并发数
        factory.setConcurrency(3);
        
        // 设置手动提交
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        
        return factory;
    }
}

2. Kafka消息生产者

Kafka消息生产者

// KafkaMessageProducer.java
package com.example.demo.producer;

import com.example.demo.model.OrderMessage;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

import java.util.List;

@Slf4j
@Service
public class KafkaMessageProducer {

    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;

    /**
     * 发送订单创建消息
     */
    public void sendOrderCreated(OrderMessage orderMessage) {
        try {
            String topic = "order-created";
            ListenableFuture<SendResult<String, Object>> future = 
                kafkaTemplate.send(topic, orderMessage.getOrderNumber(), orderMessage);
            
            future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
                @Override
                public void onSuccess(SendResult<String, Object> result) {
                    log.info("订单创建消息发送成功: {}, offset: {}", 
                            orderMessage.getOrderNumber(), 
                            result.getRecordMetadata().offset());
                }
                
                @Override
                public void onFailure(Throwable ex) {
                    log.error("订单创建消息发送失败: {}", orderMessage.getOrderNumber(), ex);
                }
            });
            
        } catch (Exception e) {
            log.error("订单创建消息发送异常: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("消息发送失败", e);
        }
    }

    /**
     * 发送订单支付消息
     */
    public void sendOrderPaid(OrderMessage orderMessage) {
        try {
            String topic = "order-paid";
            ListenableFuture<SendResult<String, Object>> future = 
                kafkaTemplate.send(topic, orderMessage.getOrderNumber(), orderMessage);
            
            future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
                @Override
                public void onSuccess(SendResult<String, Object> result) {
                    log.info("订单支付消息发送成功: {}, offset: {}", 
                            orderMessage.getOrderNumber(), 
                            result.getRecordMetadata().offset());
                }
                
                @Override
                public void onFailure(Throwable ex) {
                    log.error("订单支付消息发送失败: {}", orderMessage.getOrderNumber(), ex);
                }
            });
            
        } catch (Exception e) {
            log.error("订单支付消息发送异常: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("消息发送失败", e);
        }
    }

    /**
     * 发送批量消息
     */
    public void sendBatchMessages(List<OrderMessage> messages) {
        try {
            String topic = "order-batch";
            for (OrderMessage message : messages) {
                kafkaTemplate.send(topic, message.getOrderNumber(), message);
            }
            log.info("批量消息发送成功,数量: {}", messages.size());
        } catch (Exception e) {
            log.error("批量消息发送失败", e);
            throw new RuntimeException("批量消息发送失败", e);
        }
    }

    /**
     * 发送带分区键的消息
     */
    public void sendMessageWithPartition(OrderMessage orderMessage, int partition) {
        try {
            String topic = "order-partitioned";
            kafkaTemplate.send(topic, partition, orderMessage.getOrderNumber(), orderMessage);
            log.info("分区消息发送成功: {}, 分区: {}", orderMessage.getOrderNumber(), partition);
        } catch (Exception e) {
            log.error("分区消息发送失败: {}", orderMessage.getOrderNumber(), e);
            throw new RuntimeException("分区消息发送失败", e);
        }
    }
}

3. Kafka消息消费者

Kafka消息消费者

// KafkaMessageConsumer.java
package com.example.demo.consumer;

import com.example.demo.model.OrderMessage;
import com.example.demo.service.OrderService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Service;

@Slf4j
@Service
public class KafkaMessageConsumer {

    @Autowired
    private OrderService orderService;

    /**
     * 消费订单创建消息
     */
    @KafkaListener(topics = "order-created", groupId = "order-group")
    public void consumeOrderCreated(@Payload OrderMessage orderMessage,
                                  @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                                  @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                                  @Header(KafkaHeaders.OFFSET) long offset,
                                  Acknowledgment acknowledgment) {
        try {
            log.info("收到订单创建消息: topic={}, partition={}, offset={}, orderNumber={}", 
                    topic, partition, offset, orderMessage.getOrderNumber());
            
            // 处理订单创建逻辑
            orderService.processOrderCreated(orderMessage);
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
            log.info("订单创建消息处理成功: {}", orderMessage.getOrderNumber());
            
        } catch (Exception e) {
            log.error("订单创建消息处理失败: {}", orderMessage.getOrderNumber(), e);
            // 不提交偏移量,消息会重新消费
        }
    }

    /**
     * 消费订单支付消息
     */
    @KafkaListener(topics = "order-paid", groupId = "order-group")
    public void consumeOrderPaid(@Payload OrderMessage orderMessage,
                                @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                                @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                                @Header(KafkaHeaders.OFFSET) long offset,
                                Acknowledgment acknowledgment) {
        try {
            log.info("收到订单支付消息: topic={}, partition={}, offset={}, orderNumber={}", 
                    topic, partition, offset, orderMessage.getOrderNumber());
            
            // 处理订单支付逻辑
            orderService.processOrderPaid(orderMessage);
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
            log.info("订单支付消息处理成功: {}", orderMessage.getOrderNumber());
            
        } catch (Exception e) {
            log.error("订单支付消息处理失败: {}", orderMessage.getOrderNumber(), e);
            // 不提交偏移量,消息会重新消费
        }
    }

    /**
     * 消费批量消息
     */
    @KafkaListener(topics = "order-batch", groupId = "batch-group")
    public void consumeBatchMessages(@Payload List<OrderMessage> messages,
                                   @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                                   @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                                   @Header(KafkaHeaders.OFFSET) long offset,
                                   Acknowledgment acknowledgment) {
        try {
            log.info("收到批量消息: topic={}, partition={}, offset={}, count={}", 
                    topic, partition, offset, messages.size());
            
            // 处理批量消息
            for (OrderMessage message : messages) {
                orderService.processOrderCreated(message);
            }
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
            log.info("批量消息处理成功,数量: {}", messages.size());
            
        } catch (Exception e) {
            log.error("批量消息处理失败", e);
            // 不提交偏移量,消息会重新消费
        }
    }
}

✅ 第四部分:Spring异步处理与任务调度(60分钟)

1. 异步任务配置

异步配置类

// AsyncConfig.java
package com.example.demo.config;

import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;

@Configuration
@EnableAsync
@EnableScheduling
public class AsyncConfig {

    /**
     * 异步任务执行器
     */
    @Bean("taskExecutor")
    public Executor taskExecutor() {
        ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
        
        // 核心线程数
        executor.setCorePoolSize(10);
        
        // 最大线程数
        executor.setMaxPoolSize(20);
        
        // 队列容量
        executor.setQueueCapacity(500);
        
        // 线程空闲时间
        executor.setKeepAliveSeconds(60);
        
        // 线程名前缀
        executor.setThreadNamePrefix("AsyncTask-");
        
        // 拒绝策略
        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
        
        // 等待所有任务结束后再关闭线程池
        executor.setWaitForTasksToCompleteOnShutdown(true);
        
        // 等待时间
        executor.setAwaitTerminationSeconds(60);
        
        executor.initialize();
        return executor;
    }

    /**
     * 消息处理执行器
     */
    @Bean("messageExecutor")
    public Executor messageExecutor() {
        ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
        
        executor.setCorePoolSize(5);
        executor.setMaxPoolSize(10);
        executor.setQueueCapacity(200);
        executor.setKeepAliveSeconds(60);
        executor.setThreadNamePrefix("MessageTask-");
        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
        executor.setWaitForTasksToCompleteOnShutdown(true);
        executor.setAwaitTerminationSeconds(60);
        
        executor.initialize();
        return executor;
    }
}

2. 异步任务服务

异步订单处理服务

// AsyncOrderService.java
package com.example.demo.service;

import com.example.demo.model.OrderMessage;
import com.example.demo.producer.OrderMessageProducer;
import com.example.demo.producer.KafkaMessageProducer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;

import java.util.List;
import java.util.concurrent.CompletableFuture;

@Slf4j
@Service
public class AsyncOrderService {

    @Autowired
    private OrderMessageProducer orderMessageProducer;
    
    @Autowired
    private KafkaMessageProducer kafkaMessageProducer;
    
    @Autowired
    private NotificationService notificationService;

    /**
     * 异步处理订单创建
     */
    @Async("taskExecutor")
    public CompletableFuture<String> processOrderAsync(OrderMessage orderMessage) {
        try {
            log.info("开始异步处理订单: {}", orderMessage.getOrderNumber());
            
            // 模拟处理时间
            Thread.sleep(2000);
            
            // 发送订单创建消息到RabbitMQ
            orderMessageProducer.sendOrderCreated(orderMessage);
            
            // 发送订单创建消息到Kafka
            kafkaMessageProducer.sendOrderCreated(orderMessage);
            
            // 发送通知
            notificationService.sendOrderCreatedNotification(orderMessage);
            
            log.info("订单异步处理完成: {}", orderMessage.getOrderNumber());
            
            return CompletableFuture.completedFuture("SUCCESS");
            
        } catch (Exception e) {
            log.error("订单异步处理失败: {}", orderMessage.getOrderNumber(), e);
            return CompletableFuture.completedFuture("FAILED");
        }
    }

    /**
     * 异步批量处理订单
     */
    @Async("taskExecutor")
    public CompletableFuture<Integer> processBatchOrdersAsync(List<OrderMessage> orders) {
        try {
            log.info("开始异步批量处理订单,数量: {}", orders.size());
            
            int successCount = 0;
            for (OrderMessage order : orders) {
                try {
                    // 处理单个订单
                    orderMessageProducer.sendOrderCreated(order);
                    kafkaMessageProducer.sendOrderCreated(order);
                    successCount++;
                } catch (Exception e) {
                    log.error("订单处理失败: {}", order.getOrderNumber(), e);
                }
            }
            
            log.info("批量订单处理完成,成功: {}, 失败: {}", successCount, orders.size() - successCount);
            
            return CompletableFuture.completedFuture(successCount);
            
        } catch (Exception e) {
            log.error("批量订单处理失败", e);
            return CompletableFuture.completedFuture(0);
        }
    }

    /**
     * 异步处理订单支付
     */
    @Async("messageExecutor")
    public CompletableFuture<Boolean> processPaymentAsync(OrderMessage orderMessage) {
        try {
            log.info("开始异步处理订单支付: {}", orderMessage.getOrderNumber());
            
            // 模拟支付处理时间
            Thread.sleep(3000);
            
            // 发送支付成功消息
            orderMessageProducer.sendOrderPaid(orderMessage);
            kafkaMessageProducer.sendOrderPaid(orderMessage);
            
            // 发送支付成功通知
            notificationService.sendPaymentSuccessNotification(orderMessage);
            
            log.info("订单支付处理完成: {}", orderMessage.getOrderNumber());
            
            return CompletableFuture.completedFuture(true);
            
        } catch (Exception e) {
            log.error("订单支付处理失败: {}", orderMessage.getOrderNumber(), e);
            return CompletableFuture.completedFuture(false);
        }
    }
}

3. 定时任务服务

定时任务配置

// ScheduledTaskService.java
package com.example.demo.service;

import com.example.demo.producer.OrderMessageProducer;
import com.example.demo.producer.KafkaMessageProducer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;

import java.time.LocalDateTime;
import java.util.List;

@Slf4j
@Service
public class ScheduledTaskService {

    @Autowired
    private OrderMessageProducer orderMessageProducer;
    
    @Autowired
    private KafkaMessageProducer kafkaMessageProducer;
    
    @Autowired
    private OrderService orderService;

    /**
     * 定时清理过期订单(每分钟执行一次)
     */
    @Scheduled(fixedRate = 60000)
    public void cleanExpiredOrders() {
        try {
            log.info("开始清理过期订单,时间: {}", LocalDateTime.now());
            
            // 获取过期订单
            List<OrderMessage> expiredOrders = orderService.getExpiredOrders();
            
            if (!expiredOrders.isEmpty()) {
                // 发送订单取消消息
                for (OrderMessage order : expiredOrders) {
                    orderMessageProducer.sendOrderCancelled(order);
                    kafkaMessageProducer.sendOrderCancelled(order);
                }
                
                log.info("过期订单清理完成,数量: {}", expiredOrders.size());
            } else {
                log.info("没有过期订单需要清理");
            }
            
        } catch (Exception e) {
            log.error("清理过期订单失败", e);
        }
    }

    /**
     * 定时发送提醒消息(每天上午9点执行)
     */
    @Scheduled(cron = "0 0 9 * * ?")
    public void sendDailyReminders() {
        try {
            log.info("开始发送每日提醒消息,时间: {}", LocalDateTime.now());
            
            // 获取需要提醒的订单
            List<OrderMessage> reminderOrders = orderService.getOrdersNeedingReminder();
            
            if (!reminderOrders.isEmpty()) {
                // 发送提醒消息
                for (OrderMessage order : reminderOrders) {
                    // 发送延迟提醒消息
                    orderMessageProducer.sendDelayedMessage(order, 300000); // 5分钟后发送
                }
                
                log.info("每日提醒消息发送完成,数量: {}", reminderOrders.size());
            } else {
                log.info("没有需要提醒的订单");
            }
            
        } catch (Exception e) {
            log.error("发送每日提醒消息失败", e);
        }
    }

    /**
     * 定时统计消息队列状态(每5分钟执行一次)
     */
    @Scheduled(fixedRate = 300000)
    public void monitorQueueStatus() {
        try {
            log.info("开始监控消息队列状态,时间: {}", LocalDateTime.now());
            
            // 获取队列统计信息
            QueueStats stats = getQueueStats();
            
            log.info("队列状态监控 - 总消息数: {}, 待处理: {}, 处理中: {}, 已完成: {}", 
                    stats.getTotalMessages(), 
                    stats.getPendingMessages(), 
                    stats.getProcessingMessages(), 
                    stats.getCompletedMessages());
            
        } catch (Exception e) {
            log.error("监控消息队列状态失败", e);
        }
    }

    /**
     * 队列统计信息
     */
    private QueueStats getQueueStats() {
        // 这里应该调用RabbitMQ和Kafka管理API获取实际统计信息
        // 为了演示,返回模拟数据
        QueueStats stats = new QueueStats();
        stats.setTotalMessages(1000);
        stats.setPendingMessages(150);
        stats.setProcessingMessages(50);
        stats.setCompletedMessages(800);
        return stats;
    }

    /**
     * 队列统计信息类
     */
    public static class QueueStats {
        private int totalMessages;
        private int pendingMessages;
        private int processingMessages;
        private int completedMessages;

        // Getter和Setter方法
        public int getTotalMessages() { return totalMessages; }
        public void setTotalMessages(int totalMessages) { this.totalMessages = totalMessages; }

        public int getPendingMessages() { return pendingMessages; }
        public void setPendingMessages(int pendingMessages) { this.pendingMessages = pendingMessages; }

        public int getProcessingMessages() { return processingMessages; }
        public void setProcessingMessages(int processingMessages) { this.processingMessages = processingMessages; }

        public int getCompletedMessages() { return completedMessages; }
        public void setCompletedMessages(int completedMessages) { this.completedMessages = completedMessages; }
    }
}

✅ 第五部分:MQTT协议与物联网应用(60分钟)

1. MQTT协议基础

MQTT协议特点

// MQTTProtocolBasics.java
package com.example.demo.mq.mqtt;

import java.util.HashMap;
import java.util.Map;

public class MQTTProtocolBasics {
    
    public static class MQTTFeature {
        private String name;
        private String description;
        private String benefit;
        private String qosLevel;
        
        public MQTTFeature(String name, String description, String benefit, String qosLevel) {
            this.name = name;
            this.description = description;
            this.benefit = benefit;
            this.qosLevel = qosLevel;
        }
        
        // Getter方法
        public String getName() { return name; }
        public String getDescription() { return description; }
        public String getBenefit() { return benefit; }
        public String getQosLevel() { return qosLevel; }
    }
    
    public static void main(String[] args) {
        Map<String, MQTTFeature> features = new HashMap<>();
        
        // 轻量级协议
        features.put("lightweight", new MQTTFeature(
            "轻量级协议",
            "协议头部开销小,适合资源受限的设备",
            "降低网络带宽消耗,提高传输效率",
            "QoS 0 - 最多一次"
        ));
        
        // 发布订阅模式
        features.put("pubsub", new MQTTFeature(
            "发布订阅模式",
            "支持一对多的消息分发",
            "简化消息路由,提高系统灵活性",
            "QoS 1 - 至少一次"
        ));
        
        // 服务质量等级
        features.put("qos", new MQTTFeature(
            "服务质量等级",
            "提供不同级别的消息传递保证",
            "根据业务需求选择合适的可靠性级别",
            "QoS 2 - 恰好一次"
        ));
        
        // 持久会话
        features.put("persistent", new MQTTFeature(
            "持久会话",
            "客户端断开重连后能恢复消息",
            "提高系统可靠性,保证消息不丢失",
            "Clean Session = false"
        ));
        
        System.out.println("=== MQTT协议核心特性 ===");
        for (Map.Entry<String, MQTTFeature> entry : features.entrySet()) {
            MQTTFeature feature = entry.getValue();
            System.out.println("\n特性: " + feature.getName());
            System.out.println("描述: " + feature.getDescription());
            System.out.println("好处: " + feature.getBenefit());
            System.out.println("QoS级别: " + feature.getQosLevel());
        }
    }
}

2. MQTT客户端实现

MQTT客户端配置

// MQTTClientConfig.java
package com.example.demo.config;

import org.eclipse.paho.client.mqttv3.MqttClient;
import org.eclipse.paho.client.mqttv3.MqttConnectOptions;
import org.eclipse.paho.client.mqttv3.MqttException;
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration
public class MQTTClientConfig {

    @Value("${mqtt.broker.url:tcp://localhost:1883}")
    private String brokerUrl;
    
    @Value("${mqtt.client.id:spring-mqtt-client}")
    private String clientId;
    
    @Value("${mqtt.username:}")
    private String username;
    
    @Value("${mqtt.password:}")
    private String password;

    /**
     * MQTT客户端
     */
    @Bean
    public MqttClient mqttClient() throws MqttException {
        MemoryPersistence persistence = new MemoryPersistence();
        MqttClient client = new MqttClient(brokerUrl, clientId, persistence);
        return client;
    }

    /**
     * MQTT连接选项
     */
    @Bean
    public MqttConnectOptions mqttConnectOptions() {
        MqttConnectOptions options = new MqttConnectOptions();
        
        // 设置用户名和密码
        if (username != null && !username.isEmpty()) {
            options.setUserName(username);
        }
        if (password != null && !password.isEmpty()) {
            options.setPassword(password.toCharArray());
        }
        
        // 设置连接选项
        options.setCleanSession(true);
        options.setConnectionTimeout(30);
        options.setKeepAliveInterval(60);
        options.setAutomaticReconnect(true);
        
        return options;
    }
}

3. MQTT消息服务

MQTT消息服务

// MQTTMessageService.java
package com.example.demo.service;

import com.example.demo.model.IoTMessage;
import lombok.extern.slf4j.Slf4j;
import org.eclipse.paho.client.mqttv3.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.util.concurrent.CompletableFuture;

@Slf4j
@Service
public class MQTTMessageService {

    @Autowired
    private MqttClient mqttClient;
    
    @Autowired
    private MqttConnectOptions mqttConnectOptions;

    @PostConstruct
    public void connect() {
        try {
            mqttClient.connect(mqttConnectOptions);
            log.info("MQTT客户端连接成功");
        } catch (MqttException e) {
            log.error("MQTT客户端连接失败", e);
        }
    }

    @PreDestroy
    public void disconnect() {
        try {
            if (mqttClient.isConnected()) {
                mqttClient.disconnect();
                log.info("MQTT客户端断开连接");
            }
        } catch (MqttException e) {
            log.error("MQTT客户端断开连接失败", e);
        }
    }

    /**
     * 发布消息
     */
    public void publish(String topic, IoTMessage message, int qos) {
        try {
            String payload = message.toJson();
            MqttMessage mqttMessage = new MqttMessage(payload.getBytes());
            mqttMessage.setQos(qos);
            mqttMessage.setRetained(false);
            
            mqttClient.publish(topic, mqttMessage);
            log.info("MQTT消息发布成功: topic={}, qos={}", topic, qos);
            
        } catch (MqttException e) {
            log.error("MQTT消息发布失败: topic={}", topic, e);
            throw new RuntimeException("消息发布失败", e);
        }
    }

    /**
     * 异步发布消息
     */
    public CompletableFuture<Void> publishAsync(String topic, IoTMessage message, int qos) {
        return CompletableFuture.runAsync(() -> {
            publish(topic, message, qos);
        });
    }

    /**
     * 订阅主题
     */
    public void subscribe(String topic, int qos, MqttCallback callback) {
        try {
            mqttClient.setCallback(callback);
            mqttClient.subscribe(topic, qos);
            log.info("MQTT主题订阅成功: topic={}, qos={}", topic, qos);
            
        } catch (MqttException e) {
            log.error("MQTT主题订阅失败: topic={}", topic, e);
            throw new RuntimeException("主题订阅失败", e);
        }
    }

    /**
     * 取消订阅
     */
    public void unsubscribe(String topic) {
        try {
            mqttClient.unsubscribe(topic);
            log.info("MQTT主题取消订阅成功: topic={}", topic);
            
        } catch (MqttException e) {
            log.error("MQTT主题取消订阅失败: topic={}", topic, e);
            throw new RuntimeException("主题取消订阅失败", e);
        }
    }
}

IoT消息实体

// IoTMessage.java
package com.example.demo.model;

import com.fasterxml.jackson.annotation.JsonFormat;
import lombok.Data;
import java.time.LocalDateTime;
import java.util.Map;

@Data
public class IoTMessage {
    
    private String deviceId;
    private String messageType;
    private String payload;
    private Map<String, Object> data;
    private Double latitude;
    private Double longitude;
    private String location;
    
    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
    private LocalDateTime timestamp;
    
    private String qos;
    private String status;
    
    public IoTMessage() {
        this.timestamp = LocalDateTime.now();
        this.status = "ACTIVE";
    }
    
    public IoTMessage(String deviceId, String messageType, String payload) {
        this();
        this.deviceId = deviceId;
        this.messageType = messageType;
        this.payload = payload;
    }
    
    public String toJson() {
        try {
            com.fasterxml.jackson.databind.ObjectMapper mapper = new com.fasterxml.jackson.databind.ObjectMapper();
            return mapper.writeValueAsString(this);
        } catch (Exception e) {
            throw new RuntimeException("JSON序列化失败", e);
        }
    }
    
    public static IoTMessage fromJson(String json) {
        try {
            com.fasterxml.jackson.databind.ObjectMapper mapper = new com.fasterxml.jackson.databind.ObjectMapper();
            return mapper.readValue(json, IoTMessage.class);
        } catch (Exception e) {
            throw new RuntimeException("JSON反序列化失败", e);
        }
    }
}

4. MQTT消息消费者

MQTT消息消费者

// MQTTMessageConsumer.java
package com.example.demo.consumer;

import com.example.demo.model.IoTMessage;
import com.example.demo.service.IoTDataService;
import lombok.extern.slf4j.Slf4j;
import org.eclipse.paho.client.mqttv3.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

@Slf4j
@Component
public class MQTTMessageConsumer implements MqttCallback {

    @Autowired
    private IoTDataService ioTDataService;

    @Override
    public void connectionLost(Throwable cause) {
        log.error("MQTT连接丢失", cause);
    }

    @Override
    public void messageArrived(String topic, MqttMessage message) throws Exception {
        try {
            String payload = new String(message.getBytes());
            log.info("收到MQTT消息: topic={}, qos={}, payload={}", 
                    topic, message.getQos(), payload);
            
            // 解析消息
            IoTMessage ioTMessage = IoTMessage.fromJson(payload);
            
            // 处理不同类型的消息
            switch (ioTMessage.getMessageType()) {
                case "SENSOR_DATA":
                    ioTDataService.processSensorData(ioTMessage);
                    break;
                case "DEVICE_STATUS":
                    ioTDataService.processDeviceStatus(ioTMessage);
                    break;
                case "ALERT":
                    ioTDataService.processAlert(ioTMessage);
                    break;
                default:
                    log.warn("未知消息类型: {}", ioTMessage.getMessageType());
            }
            
        } catch (Exception e) {
            log.error("MQTT消息处理失败: topic={}", topic, e);
        }
    }

    @Override
    public void deliveryComplete(IMqttDeliveryToken token) {
        log.debug("MQTT消息投递完成: messageId={}", token.getMessageId());
    }
}

✅ 第六部分:消息队列性能优化与监控(60分钟)

1. 性能优化策略

消息队列性能优化

// MessageQueueOptimization.java
package com.example.demo.optimization;

import org.springframework.amqp.rabbit.config.SimpleRabbitListenerContainerFactory;
import org.springframework.amqp.rabbit.connection.ConnectionFactory;
import org.springframework.amqp.rabbit.listener.RabbitListenerContainerFactory;
import org.springframework.amqp.rabbit.listener.SimpleMessageListenerContainer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.listener.ContainerProperties;

@Configuration
public class MessageQueueOptimization {

    /**
     * RabbitMQ性能优化配置
     */
    @Bean
    public RabbitListenerContainerFactory<SimpleMessageListenerContainer> rabbitListenerContainerFactory(
            ConnectionFactory connectionFactory) {
        SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
        factory.setConnectionFactory(connectionFactory);
        
        // 并发消费者数量
        factory.setConcurrentConsumers(5);
        factory.setMaxConcurrentConsumers(10);
        
        // 预取数量
        factory.setPrefetchCount(1);
        
        // 确认模式
        factory.setAcknowledgeMode(org.springframework.amqp.core.AcknowledgeMode.MANUAL);
        
        // 重试配置
        factory.setRetryTemplate(new org.springframework.retry.support.RetryTemplate());
        
        return factory;
    }

    /**
     * Kafka性能优化配置
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> kafkaListenerContainerFactory(
            ConsumerFactory<String, Object> consumerFactory) {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory = 
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        
        // 并发数
        factory.setConcurrency(3);
        
        // 批量消费
        factory.setBatchListener(true);
        
        // 手动提交
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        
        // 轮询间隔
        factory.getContainerProperties().setPollTimeout(1000);
        
        return factory;
    }
}

2. 消息队列监控

消息队列监控服务

// MessageQueueMonitorService.java
package com.example.demo.service;

import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;

@Slf4j
@Service
public class MessageQueueMonitorService {

    @Autowired
    private RabbitTemplate rabbitTemplate;
    
    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;

    // 消息统计
    private final AtomicLong rabbitSentCount = new AtomicLong(0);
    private final AtomicLong rabbitReceivedCount = new AtomicLong(0);
    private final AtomicLong kafkaSentCount = new AtomicLong(0);
    private final AtomicLong kafkaReceivedCount = new AtomicLong(0);
    private final AtomicLong errorCount = new AtomicLong(0);

    /**
     * 获取RabbitMQ统计信息
     */
    public Map<String, Object> getRabbitMQStats() {
        Map<String, Object> stats = new HashMap<>();
        
        try {
            // 这里应该调用RabbitMQ管理API获取实际统计信息
            // 为了演示,返回模拟数据
            stats.put("sentMessages", rabbitSentCount.get());
            stats.put("receivedMessages", rabbitReceivedCount.get());
            stats.put("errorCount", errorCount.get());
            stats.put("status", "HEALTHY");
            
        } catch (Exception e) {
            log.error("获取RabbitMQ统计信息失败", e);
            stats.put("status", "ERROR");
            stats.put("error", e.getMessage());
        }
        
        return stats;
    }

    /**
     * 获取Kafka统计信息
     */
    public Map<String, Object> getKafkaStats() {
        Map<String, Object> stats = new HashMap<>();
        
        try {
            // 这里应该调用Kafka管理API获取实际统计信息
            // 为了演示,返回模拟数据
            stats.put("sentMessages", kafkaSentCount.get());
            stats.put("receivedMessages", kafkaReceivedCount.get());
            stats.put("errorCount", errorCount.get());
            stats.put("status", "HEALTHY");
            
        } catch (Exception e) {
            log.error("获取Kafka统计信息失败", e);
            stats.put("status", "ERROR");
            stats.put("error", e.getMessage());
        }
        
        return stats;
    }

    /**
     * 获取整体统计信息
     */
    public Map<String, Object> getOverallStats() {
        Map<String, Object> stats = new HashMap<>();
        
        stats.put("rabbitMQ", getRabbitMQStats());
        stats.put("kafka", getKafkaStats());
        
        // 计算总体统计
        long totalSent = rabbitSentCount.get() + kafkaSentCount.get();
        long totalReceived = rabbitReceivedCount.get() + kafkaReceivedCount.get();
        
        stats.put("totalSent", totalSent);
        stats.put("totalReceived", totalReceived);
        stats.put("totalErrors", errorCount.get());
        
        // 计算成功率
        double successRate = totalSent > 0 ? (double) totalReceived / totalSent * 100 : 0;
        stats.put("successRate", String.format("%.2f%%", successRate));
        
        return stats;
    }

    /**
     * 记录消息发送
     */
    public void recordMessageSent(String queueType) {
        if ("rabbitmq".equals(queueType)) {
            rabbitSentCount.incrementAndGet();
        } else if ("kafka".equals(queueType)) {
            kafkaSentCount.incrementAndGet();
        }
    }

    /**
     * 记录消息接收
     */
    public void recordMessageReceived(String queueType) {
        if ("rabbitmq".equals(queueType)) {
            rabbitReceivedCount.incrementAndGet();
        } else if ("kafka".equals(queueType)) {
            kafkaReceivedCount.incrementAndGet();
        }
    }

    /**
     * 记录错误
     */
    public void recordError() {
        errorCount.incrementAndGet();
    }
}

3. 健康检查端点

消息队列健康检查

// MessageQueueHealthController.java
package com.example.demo.controller;

import com.example.demo.service.MessageQueueMonitorService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.util.Map;

@RestController
@RequestMapping("/api/monitor")
public class MessageQueueHealthController {

    @Autowired
    private MessageQueueMonitorService monitorService;

    /**
     * 获取消息队列健康状态
     */
    @GetMapping("/health")
    public ResponseEntity<Map<String, Object>> getHealthStatus() {
        Map<String, Object> stats = monitorService.getOverallStats();
        return ResponseEntity.ok(stats);
    }

    /**
     * 获取RabbitMQ状态
     */
    @GetMapping("/rabbitmq")
    public ResponseEntity<Map<String, Object>> getRabbitMQStatus() {
        Map<String, Object> stats = monitorService.getRabbitMQStats();
        return ResponseEntity.ok(stats);
    }

    /**
     * 获取Kafka状态
     */
    @GetMapping("/kafka")
    public ResponseEntity<Map<String, Object>> getKafkaStatus() {
        Map<String, Object> stats = monitorService.getKafkaStats();
        return ResponseEntity.ok(stats);
    }
}

4. 消息队列测试控制器

消息队列测试接口

// MessageQueueTestController.java
package com.example.demo.controller;

import com.example.demo.model.OrderMessage;
import com.example.demo.model.IoTMessage;
import com.example.demo.producer.OrderMessageProducer;
import com.example.demo.producer.KafkaMessageProducer;
import com.example.demo.service.MQTTMessageService;
import com.example.demo.service.MessageQueueMonitorService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;

import java.math.BigDecimal;
import java.util.HashMap;
import java.util.Map;

@RestController
@RequestMapping("/api/test")
public class MessageQueueTestController {

    @Autowired
    private OrderMessageProducer orderMessageProducer;
    
    @Autowired
    private KafkaMessageProducer kafkaMessageProducer;
    
    @Autowired
    private MQTTMessageService mqttMessageService;
    
    @Autowired
    private MessageQueueMonitorService monitorService;

    /**
     * 测试RabbitMQ消息发送
     */
    @PostMapping("/rabbitmq/send")
    public ResponseEntity<Map<String, Object>> testRabbitMQSend(@RequestBody OrderMessage orderMessage) {
        try {
            orderMessageProducer.sendOrderCreated(orderMessage);
            monitorService.recordMessageSent("rabbitmq");
            
            return ResponseEntity.ok(Map.of(
                "success", true,
                "message", "RabbitMQ消息发送成功",
                "orderNumber", orderMessage.getOrderNumber()
            ));
            
        } catch (Exception e) {
            monitorService.recordError();
            return ResponseEntity.badRequest().body(Map.of(
                "success", false,
                "message", "RabbitMQ消息发送失败",
                "error", e.getMessage()
            ));
        }
    }

    /**
     * 测试Kafka消息发送
     */
    @PostMapping("/kafka/send")
    public ResponseEntity<Map<String, Object>> testKafkaSend(@RequestBody OrderMessage orderMessage) {
        try {
            kafkaMessageProducer.sendOrderCreated(orderMessage);
            monitorService.recordMessageSent("kafka");
            
            return ResponseEntity.ok(Map.of(
                "success", true,
                "message", "Kafka消息发送成功",
                "orderNumber", orderMessage.getOrderNumber()
            ));
            
        } catch (Exception e) {
            monitorService.recordError();
            return ResponseEntity.badRequest().body(Map.of(
                "success", false,
                "message", "Kafka消息发送失败",
                "error", e.getMessage()
            ));
        }
    }

    /**
     * 测试MQTT消息发送
     */
    @PostMapping("/mqtt/send")
    public ResponseEntity<Map<String, Object>> testMQTTSend(@RequestBody IoTMessage ioTMessage) {
        try {
            mqttMessageService.publish("iot/sensor/data", ioTMessage, 1);
            
            return ResponseEntity.ok(Map.of(
                "success", true,
                "message", "MQTT消息发送成功",
                "deviceId", ioTMessage.getDeviceId()
            ));
            
        } catch (Exception e) {
            return ResponseEntity.badRequest().body(Map.of(
                "success", false,
                "message", "MQTT消息发送失败",
                "error", e.getMessage()
            ));
        }
    }

    /**
     * 批量测试消息发送
     */
    @PostMapping("/batch/send")
    public ResponseEntity<Map<String, Object>> testBatchSend(@RequestParam int count) {
        try {
            int successCount = 0;
            int errorCount = 0;
            
            for (int i = 0; i < count; i++) {
                try {
                    OrderMessage orderMessage = new OrderMessage(
                        "ORDER-" + System.currentTimeMillis() + "-" + i,
                        1001L,
                        2001L,
                        1,
                        new BigDecimal("99.99"),
                        "CREATED"
                    );
                    
                    // 发送到RabbitMQ
                    orderMessageProducer.sendOrderCreated(orderMessage);
                    monitorService.recordMessageSent("rabbitmq");
                    
                    // 发送到Kafka
                    kafkaMessageProducer.sendOrderCreated(orderMessage);
                    monitorService.recordMessageSent("kafka");
                    
                    successCount++;
                    
                } catch (Exception e) {
                    errorCount++;
                    monitorService.recordError();
                }
            }
            
            return ResponseEntity.ok(Map.of(
                "success", true,
                "message", "批量消息发送完成",
                "totalCount", count,
                "successCount", successCount,
                "errorCount", errorCount
            ));
            
        } catch (Exception e) {
            return ResponseEntity.badRequest().body(Map.of(
                "success", false,
                "message", "批量消息发送失败",
                "error", e.getMessage()
            ));
        }
    }
}

🎯 今日学习总结

1. 掌握的核心技能

  • ✅ Spring消息队列基础概念与支持
  • ✅ RabbitMQ与AMQP协议实现
  • ✅ Kafka消息队列集成与使用
  • ✅ Spring异步处理与任务调度
  • ✅ MQTT协议与物联网应用
  • ✅ 消息队列性能优化与监控

2. 消息队列技术对比

技术协议特点适用场景
RabbitMQAMQP功能丰富、可靠性高企业级应用、复杂路由
Kafka自定义高吞吐量、分布式大数据处理、日志收集
MQTTMQTT轻量级、低功耗物联网、移动应用

3. Spring消息队列特性

  • 统一抽象:Spring提供统一的消息队列抽象层
  • 自动配置:Spring Boot自动配置消息队列组件
  • 异步支持:@Async注解支持异步任务处理
  • 监控集成:Actuator提供健康检查和监控

4. 性能优化策略

  • 并发配置:合理设置消费者并发数
  • 批量处理:使用批量消费提高吞吐量
  • 连接池:配置连接池减少连接开销
  • 监控告警:实时监控消息队列状态

学习建议

  1. 环境搭建:安装RabbitMQ、Kafka、MQTT Broker,配置开发环境
  2. 基础练习:实现简单的消息发送和接收功能
  3. 协议理解:深入理解AMQP、MQTT协议的工作原理
  4. 性能测试:使用JMeter等工具测试消息队列性能
  5. 监控实践:配置消息队列监控和告警系统
  6. 问题排查:学会排查消息丢失、重复消费等常见问题