1.参考链接
2.组网示意
3.环境搭建
使用虚拟机VMWare创建docker容器,使用NAT转换关联本机IP
-
创建mysql容器
1. 拉取镜像 docker pull mysql:latest 2. 创建目录文件等. mkdir -p /mysql/conf /mysql/data /mysql/log /mysql/mysql-files chmod 777 /mysql/* -R chmod 0644 /mysql/conf -R # 注意 配置文件文件的权限要单独处理, 不然会有错误提示. 添加一个配置文件 [mysqld] datadir=/var/lib/mysql socket=/var/lib/mysql/mysql.sock log-error=/var/log/mysqld.log pid-file=/var/run/mysqld/mysqld.pid max_allowed_packet = 1G innodb_log_file_size = 30M innodb_log_buffer_size = 256M innodb_file_per_table = 1 innodb_buffer_pool_size=1G #innodb_file_format=barracuda max_connections=2000 lower-case-table-names=1 innodb_strict_mode = 0 character_set_server=utf8 secure_file_priv = transaction-isolation = READ-COMMITTED default_authentication_plugin=mysql_native_password default-time-zone = '+08:00' #skip-grant-tables [mysql] prompt="\\u@\\h : \\d \\r:\\m:\\s>" default-character-set=utf8 将文件保存为: /mysql/conf/my.conf 并且设置 0644 的文件权限. 3. 启动镜像 命令如下: docker run --name mysql -p 3306:3306 -v /mysql/conf/my.conf:/etc/mysql/my.cnf -v /mysql/data:/var/lib/mysql -v /mysql/log:/var/log/mysql -v /mysql/mysql-files:/var/lib/mysql-files -e MYSQL_ROOT_PASSWORD=123456 --restart=always -d mysql:8.0.26 # 注意我改了参数, 端口号等 并且数据库自动启动相关. `docker run -p 3306:3306 --name mysql -v /usr/mydata/mysql/log:/var/log/mysql -v /usr/mydata/mysql/data:/var/lib/mysql -v /usr/mydata/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 -d mysql:latest` 4. 登录镜像创建可以远程访问的用户权限. docker exec -it mysql bash 输入 mysql -uroot 登录数据库 创建一个可以远程访问的有用户, 注意我这边使用了native password 所以密码创建方式是不一样的 create user 'root'@'%' identified WITH mysql_native_password by 'Tyourpasswords'; flush privileges ; -
创建canal服务端
创建canal-server容器 docker run -d --name canal-server -p 11111:11111 canal/canal-server:latest 配置canal-server #进入canal-server容器 docker exec -it canal-server /bin/bash #编辑canal-server的配置 vi canal-server/conf/example/instance.properties #需要修改slaveid,账号密码,监听的目的端口 -
配置
-
在mysql库中创建对应的canal账号,开启bin_log记录,并添加权限
-
canal server端配置对应的客户端,并设置slaveID不重复,distinction与canal客户端的保持一致
root@yjx-virtual-machine:~# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5cdc2cb453ee canal/canal-server:latest "/alidata/bin/main.s…" 7 days ago Up 3 hours 9100/tcp, 11110/tcp, 11112/tcp, 0.0.0.0:11111->11111/tcp, :::11111->11111/tcp canal-server a01cb9670bd7 mysql "docker-entrypoint.s…" 8 days ago Up 9 hours 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp mysql-test
-
-
启动客户端
package com.yjx.flinktest.canal;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry.*;
import com.alibaba.otter.canal.protocol.Message;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.stereotype.Component;
import java.net.InetSocketAddress;
import java.util.List;
@Component
public class CanalClient implements InitializingBean {
private final static int BATCH_SIZE = 1000;
@Override
public void afterPropertiesSet() throws Exception {
String ip = "192.168.206.128";
String destination = "example";
//创建连接对象
CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress(ip, 11111), destination, "canal", "canal");
int batchSize = 1000;
int emptyCount = 0;
try {
connector.connect();
connector.subscribe(".*\\..*");
connector.rollback();
int totalEmptyCount = 120;
while (emptyCount < totalEmptyCount) {
Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
emptyCount++;
System.out.println("empty count : " + emptyCount);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
} else {
emptyCount = 0;
// System.out.printf("message[batchId=%s,size=%s] \n", batchId, size);
printEntry(message.getEntries());
}
connector.ack(batchId); // 提交确认
// connector.rollback(batchId); // 处理失败, 回滚数据
}
System.out.println("empty too many times, exit");
} finally {
connector.disconnect();
}
}
/**
* 打印canal server解析binlog获得的实体类信息
*/
private static void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
//开启/关闭事务的实体类型,跳过
continue;
}
//RowChange对象,包含了一行数据变化的所有特征
//比如isDdl 是否是ddl变更操作 sql 具体的ddl sql beforeColumns afterColumns 变更前后的数据字段等等
RowChange rowChage;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), e);
}
//获取操作类型:insert/update/delete类型
EventType eventType = rowChage.getEventType();
//打印Header信息
System.out.println(String.format("================》; binlog[%s:%s] , name[%s,%s] , eventType : %s", entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(), entry.getHeader().getSchemaName(), entry.getHeader().getTableName(), eventType));
//判断是否是DDL语句
if (rowChage.getIsDdl()) {
System.out.println("================》;isDdl: true,sql:" + rowChage.getSql());
}
//获取RowChange对象里的每一行数据,打印出来
for (RowData rowData : rowChage.getRowDatasList()) {
//如果是删除语句
if (eventType == EventType.DELETE) {
printColumn(rowData.getBeforeColumnsList());
//如果是新增语句
} else if (eventType == EventType.INSERT) {
printColumn(rowData.getAfterColumnsList());
//如果是更新的语句
} else {
//变更前的数据
System.out.println("------->; before");
printColumn(rowData.getBeforeColumnsList());
//变更后的数据
System.out.println("------->; after");
printColumn(rowData.getAfterColumnsList());
}
}
}
}
private static void printColumn(List<Column> columns) {
for (Column column : columns) {
System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated());
}
}
}
4 用途
事情的起因是业务上有个聚合查询的任务中心,在考虑最低的人力成本下,还是决定使用中间件来同步数据,这样可以把消息归一,不用在每个action添加消息来处理,只监控业务表,然后统一由canal-clint向消息中间件发送消息进行消费,同时又有比较高的一致性
同时,canal支持一个server启动多个instance,多个server的相同instance同一时刻只能有一个运行,且同一时间一个instance只允许一个client进行操作
基于以上,canal可以用来
- 做数据镜像备份
- 更新分布式的缓存,索引
- 组合不同的业务并添加逻辑