开启数据库binlog功能
mysql修改etc/mysql/my.cnf文件
mariadb修改etc/mysql/mariadb.conf.d/50-server.cnf文件
[mysqld]
log-bin=mysql-bin # 开启 binlog
binlog-format=ROW # 选择 ROW 模式
server_id=1 # 保证唯一,不能和 canal 中的 slaveId 重复
修改完重启数据库
SHOW VARIABLES LIKE 'log_bin';
设置中间件接收数据
可以使用canal或debezium,都可以实现增量日志解析,提供增量数据订阅和消费。
Canal
GitHub - alibaba/canal: 阿里巴巴 MySQL binlog 增量订阅&消费组件
当前的 canal 支持源端 MySQL 版本包括 5.1.x , 5.5.x , 5.6.x , 5.7.x , 8.0.x
debezium
官方demo:
debezium-examples/mysql-replication at main · debezium/debezium-examples (github.com)
本次使用的debezium配合mariadb10.6.16使用。
version: '2'
services:
zookeeper:
image: quay.io/debezium/zookeeper:2.1
ports:
- 2181:2181
- 2888:2888
- 3888:3888
kafka:
image: quay.io/debezium/kafka:2.1
ports:
- 9092:9092
links:
- zookeeper
environment:
- ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_BROKER_ID=0
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.31.16.195:9092
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
# redis:
# image: redis:latest
# ports:
# - 6379:6379
connect:
image: quay.io/debezium/connect:2.1
ports:
- 8083:8083
links:
- kafka
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=1
- CONFIG_STORAGE_TOPIC=my_connect_configs
- OFFSET_STORAGE_TOPIC=my_connect_offsets
注册mariadb数据库
向localhost:8083/connectors/发生post请求进行注册,get请求查看已注册的connector
{
"name": "bonddb1-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"tasks.max": "1",
"database.hostname": "192.168.31.211",
"database.port": "3308",
"database.user": "root",
"database.password": "root",
"database.server.id": "1234",
"topic.prefix": "dbserver1",
"database.include.list": "bonddb",
"schema.history.internal.kafka.bootstrap.servers": "kafka:9092",
"schema.history.internal.kafka.topic": "schema-changes.bonddb",
"connector.adapter": "mariadb",
"database.protocol": "jdbc:mariadb",
"database.jdbc.driver": "org.mariadb.jdbc.Driver"
}
}
消息消费
使用kafka接收debezium生产的消息并消费
docker run -d --rm -p 19000:9000 -e KAFKA_BROKERCONNECT=192.168.31.211:9092 -e JVM_OPTS="-Xms32M -Xmx1024M" -e SERVER_SERVLET_CONTEXTPATH="/" obsidiandynamics/kafdrop
在springboot中编写监听器消费消息
kafka坐标
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<!-- <version>2.2.6.RELEASE</version>-->
</dependency>
<!-- Spring Data Redis -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.51</version>
</dependency>
配置文件
spring:
kafka:
# 消费者
consumer:
group-id: 2
auto-offset-reset: earliest
enable-auto-commit: false
bootstrap-servers: localhost:9092
max-poll-records: 100
listener:
type: batch
# 生产者
producer:
bootstrap-servers: localhost:9092
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
redis:
host: 127.0.0.1
port: 6379
接收实体类
@JsonIgnoreProperties(ignoreUnknown = true)
@Data
public class DebeziumEvent {
private Payload payload;
@Data
@JsonIgnoreProperties(ignoreUnknown = true)
public static class Payload {
private OrderLog before;
private OrderLog after;
private Source source;
private String op;
private Long tsMs;
private Object transaction; // Use Object if the field can be null or different types
//根据业务修改
@Data
public static class OrderLog {
private Long id;
private String order_number;
private String date;
private String default_s;
private String username;
private String url;
private String bz;
private String other1;
}
@Data
@JsonIgnoreProperties(ignoreUnknown = true)
public static class Source {
private String version;
private String connector;
private String name;
private Long tsMs;
private String snapshot;
private String db;
private Object sequence; // Use Object if the field can be null or different types
private String table;
private Integer serverId;
private Object gtid; // Use Object if the field can be null or different types
private String file;
private Integer pos;
private Integer row;
private Integer thread;
private Object query; // Use Object if the field can be null or different types
}
}
}
监听器
@Component
public class MyKafkaListener {
@Autowired
private RedisTemplate<String, String> redisTemplate;
@KafkaListener(topics = {"dbserver1.bonddb.order_log"})
public void listen(List<String> data) {
data.forEach(it -> {
// 处理接收到的数据
// System.out.println("Received data: " + data);
ObjectMapper mapper = new ObjectMapper();
try {
DebeziumEvent debeziumEvent = mapper.readValue(it, DebeziumEvent.class);
// System.out.println("debeziumEvent.getPayload().getOp() = " + debeziumEvent.getPayload().getOp());
//如果是删除操作
if ("d".equals(debeziumEvent.getPayload().getOp())) {
// System.out.println("debeziumEvent.getPayload().getBefore() = " + debeziumEvent.getPayload().getBefore());
} else {
DebeziumEvent.Payload.OrderLog after = debeziumEvent.getPayload().getAfter();
// System.out.println("after = " + after);
String orderNumber = after.getOrder_number();
Long id = after.getId();
// String date = after.getDate();
Map<String, String> stringObjectMap = convertObjectToMap(after);
stringObjectMap.remove("order_number");
stringObjectMap.remove("id");
//存redis中
String fullKey = orderNumber + ":" + id;
// 使用put将record放入hashOps中
redisTemplate.opsForHash().putAll(fullKey, stringObjectMap);
// 获取值
// Map<Object, Object> objectObjectMap = redisTemplate.opsForHash().entries(fullKey);
//
// System.out.println("objectObjectMap = " + objectObjectMap);
}
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
});
}
public Map<String, String> convertObjectToMap(Object obj) throws IllegalAccessException {
Map<String, String> map = new HashMap<>();
Class<?> clazz = obj.getClass();
for (Field field : clazz.getDeclaredFields()) {
field.setAccessible(true); // 设置为可访问,以便访问私有字段
if (field.get(obj) != null) {
map.put(field.getName(), field.get(obj).toString());
} else {
map.put(field.getName(), null);
}
}
return map;
}
}