云业务迁移(京东云to云鼎)第一天
1.Redis部署
Redis部署
cd /usr/local/disk_vdb/redis-7.0.0/
apt update
apt install make gcc pkg-config
make
make install
redis-server /usr/local/disk_vdb/redis-7.0.0/redis.conf
2.kafka部署
root@YD-CN-Service-15963:/usr/local/disk_vdb# cp /root/software/kafka.tar.gz .
root@YD-CN-Service-15963:/usr/local/disk_vdb# ls
docker kafka.tar.gz redis-7.0.0
root@YD-CN-Service-15963:/usr/local/disk_vdb# tar zxvf kafka.tar.gz
root@YD-CN-Service-15963:/usr/local/disk_vdb# mkdir -p /usr/local/disk_vdb/kafka_2.12-3.5.0/logs/{kraft-broker-logs,kraft-controller-logs}
server.properties配置
root@YD-CN-Service-15963:/usr/local/disk_vdb/kafka_2.12-3.5.0# cat config/kraft/server.properties
############################# Server Basics #############################
process.roles=broker,controller
node.id=1
controller.quorum.voters=1@IP:9093
############################# Socket Server Settings #############################
listeners=BROKER://:30004,CONTROLLER://:9093
advertised.listeners=BROKER://IP:30004
advertised.hosts.name=IP
inter.broker.listener.name=BROKER
controller.listener.names=CONTROLLER
listener.security.protocol.map=BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
############################# Log Basics #############################
log.dirs=/usr/local/disk_vdb/kafka_2.12-3.5.0/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
############################# Log Retention Policy #############################
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
broker.properties配置
log.dirs=/usr/local/disk_vdb/kafka_2.12-3.5.0/logs/kraft-broker-logs
controller.properties配置
log.dirs=/usr/local/disk_vdb/kafka_2.12-3.5.0/logs/kraft-broker-logs
启动kafka
root@YD-CN-Service-15963:/usr/local/disk_vdb/kafka_2.12-3.5.0# cat kafka-start.sh
#!/bin/bash
. /etc/profile
KAFKA_PATH="/usr/local/disk_vdb/kafka_2.12-3.5.0"
echo ${KAFKA_PATH}
echo 'restart kafka'
pid=$(ps -ef|grep kafka|grep -w ${KAFKA_PATH}|grep -v 'grep'|awk '{print $2}')
if [ -n "$pid" ]
then
echo 'kafka is running,kill $pid'
kill -9 $pid
fi
sleep 3
rand=`${KAFKA_PATH}/bin/kafka-storage.sh random-uuid`
echo $rand
${KAFKA_PATH}/bin/kafka-storage.sh format -t $rand -c ${KAFKA_PATH}/config/kraft/server.properties
nohup ${KAFKA_PATH}/bin/kafka-server-start.sh ${KAFKA_PATH}/config/kraft/server.properties >/dev/null 2> logs/kafka.log &
echo 'restart kafka finish'
3.安装acme自动申请证书
./acme.sh --install -m 邮箱
# 会有一个自动任务,自动给证书续期
root@YD-CN-Service-15963:~/software/acme.sh# crontab -l
9 2 * * * "/root/.acme.sh"/acme.sh --cron --home "/root/.acme.sh" > /dev/null
# 通过key方式申请证书,京东云
# JD_ACCESS_KEY_ID 和 JD_ACCESS_KEY_SECRET 将保存在 ~/.acme.sh/account.conf 中,并在需要时自动获取,无需手动再设置。
export JD_ACCESS_KEY_ID="JDC_xxxxxxx"
export JD_ACCESS_KEY_SECRET="xxxxxxxxx"
# 进入acme执行脚本
root@YD-CN-Service-15963:~# cd .acme.sh/
root@YD-CN-Service-15963:~/.acme.sh# ls
account.conf acme.sh acme.sh.env deploy dnsapi http.header notify
root@YD-CN-Service-15963:~/.acme.sh# ./acme.sh --issue --dns dns_jd -d xxx.com -d *.xxx.com
[Mon Aug 11 11:24:34 AM CST 2025] Using CA: https://acme.zerossl.com/v2/DV90
.....此处省略过程
-----END CERTIFICATE-----
.....这里是你证书的位置(xxx.com.key和fullchain.cer)
[Mon Aug 11 11:26:22 AM CST 2025] Your cert is in: /root/.acme.sh/xxx.com_ecc/xxx.com.cer
[Mon Aug 11 11:26:22 AM CST 2025] Your cert key is in: /root/.acme.sh/xxx.com_ecc/xxx.com.key
[Mon Aug 11 11:26:22 AM CST 2025] The intermediate CA cert is in: /root/.acme.sh/xxx.com_ecc/ca.cer
[Mon Aug 11 11:26:22 AM CST 2025] And the full-chain cert is in: /root/.acme.sh/xxx.com_ecc/fullchain.cer
# 使用软连接将证书放到nginx指定目录
ln -s /root/.acme.sh/xxx.com_ecc/xxx.com.key /usr/local/nginx/ssl/xxx.com.key
ln -s /root/.acme.sh/xxx.com_ecc/fullchain.cer /usr/local/nginx/ssl/fullchain.cer
# 创建一个nginx测试配置,测试证书
root@YD-CN-Service-15963:~# cd /usr/local/nginx/conf/conf.d/
root@YD-CN-Service-15963:/usr/local/nginx/conf/conf.d# vi example443.conf
server {
listen 443 ssl http2;
server_name example.xxx.com;
ssl_certificate /usr/local/nginx/ssl/fullchain.cer;
ssl_certificate_key /usr/local/nginx/ssl/xxx.com.key;
ssl_protocols TLSv1.2 TLSv1.3; # 支持的 SSL/TLS 协议
ssl_ciphers 'HIGH:!aNULL:!MD5'; # 支持的加密套件
ssl_prefer_server_ciphers on;
location / {
root /usr/local/nginx/html;
index index.html index.htm;
}
}
root@YD-CN-Service-15963:/usr/local/nginx/conf/conf.d# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
root@YD-CN-Service-15963:/usr/local/nginx/conf/conf.d# nginx -s reload
4.数据库迁移
这里要将京东云上2台Mysql服务器迁移到云鼎云并将2台服务器数据进行合并,所以使用了多主一从的方式,在切换切换数据库前还是需要同步数据
# 京东云备份mysql命令,这里我使用mydumper这个工具进行备份,好处就是这个工具可以多线程进行备份,同时在备份时会记录你的binlog信息,就不用以前那种还要看备份时主库binlog信息了,这里2台京东云mysql备份命令是一样的
#!/usr/bin/bash
mydumper -u root -p 密码 -h 172.30.0.11 -P 3306 --regex '^(库1.|库1.|库1.|库1.)' -v 3 -L /usr/local/disk_vdb1/myd.log -t 2 -o /usr/local/disk_vdb1/bak_DB
# 云鼎恢复备份命令
#!/usr/bin/bash
myloader -u root -P 3306 -p 密码 -S /usr/local/mysql/mysql.sock -t 3 -v 3 -L /usr/local/disk_vdb/myd.log -o -d /usr/local/disk_vdb/zzcbak
# 恢复后配置一主多从(我直接犯懒了 用的公司root账户,这里创建slave账户作为参考)
-------------------------------参考开始-------------------------------
# 京东主库A(创建个slave账户)
CREATE USER 'slave1'@'%' IDENTIFIED BY '111aaaAAA!';
GRANT REPLICATION SLAVE ON *.* TO 'slave1'@'%';
FLUSH PRIVILEGES;
# 京东主库B(创建个slave账户)
CREATE USER 'slave2'@'%' IDENTIFIED BY '111aaaAAA!';
GRANT REPLICATION SLAVE ON *.* TO 'slave2'@'%';
FLUSH PRIVILEGES;
-------------------------------参考结束-------------------------------
# 云鼎配置同步京东云2台主服务器
CHANGE MASTER TO
MASTER_HOST='ip1',
MASTER_PORT=33001,
MASTER_USER='root',
MASTER_LOG_FILE='mysql-binlog.000175',
MASTER_LOG_POS=47999145,
MASTER_PASSWORD='密码' for channel '17';
CHANGE MASTER TO
MASTER_HOST='ip2',
MASTER_PORT=3306,
MASTER_USER='root',
MASTER_LOG_FILE='mysql-binlog.000175',
MASTER_LOG_POS=47999145,
MASTER_PASSWORD='密码' for channel '28';
# 启动管道
START SLAVE FOR CHANNEL '17';
START SLAVE FOR CHANNEL '28';
# 查看和停止管道
SHOW SLAVE STATUS FOR CHANNEL '17';
STOP SLAVE FOR CHANNEL '17';