本文已参与「新人创作礼」活动,一起开启掘金创作之路
- ETCD 是基于Raft算法的一致性的分布式KV存储系统,由CoreOS公司发起的一个开源项目,授权协议为Apache。
- ETCD部署到3台Master节点,名称和IP如下:
- master1:192.168.66.131
- master2:192.168.66.132
- master3:192.168.66.133
- 如果没有特殊说明,本文档的所有操作均在 master1 节点上执行
- 如果跨主机通讯方案选择flanneld,则需要将Etcd降级到v3.3.x版本
一、下载etcd二进制文件,并复制到其他两台Master节点
[root@master1 ~]# mkdir -p /opt/install/soft/etcd
[root@master1 ~]# cd /opt/install/soft/etcd
[root@master1 etcd]# wget https://github.com/coreos/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
[root@master1 etcd]# tar -xvf etcd-v3.5.2-linux-amd64.tar.gz
[root@master1 etcd]# ll etcd-v3.5.2-linux-amd64
总用量 56372
drwxr-xr-x 3 528287 89939 40 2月 1 19:36 Documentation
-rwxr-xr-x 1 528287 89939 23588864 2月 1 19:36 etcd
-rwxr-xr-x 1 528287 89939 17993728 2月 1 19:36 etcdctl
-rwxr-xr-x 1 528287 89939 16068608 2月 1 19:36 etcdutl
-rw-r--r-- 1 528287 89939 42066 2月 1 19:36 README-etcdctl.md
-rw-r--r-- 1 528287 89939 7359 2月 1 19:36 README-etcdutl.md
-rw-r--r-- 1 528287 89939 9394 2月 1 19:36 README.md
-rw-r--r-- 1 528287 89939 7896 2月 1 19:36 READMEv2-etcdctl.md
[root@master1 etcd]# for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /opt/k8s/bin"
scp /opt/install/soft/etcd/etcd-v3.5.2-linux-amd64/etcd* root@${node_ip}:/opt/k8s/bin
ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
done
>>> 192.168.66.131
etcd 100% 22MB 215.1MB/s 00:00
etcdctl 100% 17MB 214.5MB/s 00:00
etcdutl 100% 15MB 219.5MB/s 00:00
>>> 192.168.66.132
etcd 100% 22MB 126.8MB/s 00:00
etcdctl 100% 17MB 120.2MB/s 00:00
etcdutl 100% 15MB 136.8MB/s 00:00
>>> 192.168.66.133
etcd 100% 22MB 158.1MB/s 00:00
etcdctl 100% 17MB 144.3MB/s 00:00
etcdutl 100% 15MB 152.8MB/s 00:00
[root@master1 etcd]#
二、配置etcd服务
1、准备服务模板
[root@master1 ~]# mkdir -p /opt/install/service
[root@master1 ~]# cd /opt/install/service
[root@master1 service]# cat > etcd.service.template <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=${ETCD_DATA_DIR}
ExecStart=/opt/k8s/bin/etcd \\
--data-dir=${ETCD_DATA_DIR} \\
--wal-dir=${ETCD_WAL_DIR} \\
--name=##NODE_NAME## \\
--cert-file=/opt/k8s/etcd/cert/etcd.pem \\
--key-file=/opt/k8s/etcd/cert/etcd-key.pem \\
--trusted-ca-file=/opt/k8s/etc/cert/ca.pem \\
--peer-cert-file=/opt/k8s/etcd/cert/etcd.pem \\
--peer-key-file=/opt/k8s/etcd/cert/etcd-key.pem \\
--peer-trusted-ca-file=/opt/k8s/etc/cert/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--listen-peer-urls=https://##NODE_IP##:2380 \\
--initial-advertise-peer-urls=https://##NODE_IP##:2380 \\
--listen-client-urls=https://##NODE_IP##:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://##NODE_IP##:2379 \\
--initial-cluster-token=etcd-cluster-0 \\
--initial-cluster=${ETCD_NODES} \\
--initial-cluster-state=new \\
--auto-compaction-mode=periodic \\
--auto-compaction-retention=1 \\
--max-request-bytes=33554432 \\
--quota-backend-bytes=6442450944 \\
--heartbeat-interval=250 \\
--election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
2、为每个节点生产服务配置文件
[root@master1 ~]# cd /opt/install/service
[root@master1 service]# for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${MASTER_NAMES[i]}/" -e "s/##NODE_IP##/${MASTER_IPS[i]}/" etcd.service.template > etcd-${MASTER_IPS[i]}.service
done
[root@master1 service]# ll etcd-*
-rw-r--r-- 1 root root 1412 4月 9 09:17 etcd-192.168.66.131.service
-rw-r--r-- 1 root root 1412 4月 9 09:17 etcd-192.168.66.132.service
-rw-r--r-- 1 root root 1412 4月 9 09:17 etcd-192.168.66.133.service
[root@master1 service]#
3、分发服务配置文件到3个Master节点
[root@master1 ~]# cd /opt/install/service
[root@master1 service]# for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
scp etcd-${node_ip}.service root@${node_ip}:/etc/systemd/system/etcd.service
done
>>> 192.168.66.131
etcd-192.168.66.131.service 100% 1412 1.3MB/s 00:00
>>> 192.168.66.132
etcd-192.168.66.132.service 100% 1412 484.5KB/s 00:00
>>> 192.168.66.133
etcd-192.168.66.133.service 100% 1412 443.3KB/s 00:00
[root@master1 service]#
4、启动Etcd服务
[root@master1 ~]# for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${ETCD_DATA_DIR} ${ETCD_WAL_DIR}"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd "
done
>>> 192.168.66.131
>>> 192.168.66.132
>>> 192.168.66.133
[root@master1 ~]#
- 如果遇到错误,查看运行日志 journalctl -xe
- 请检查etcd数据目录和工作目录是否创建成功
三、检查etcd服务状态
1、检查etcd服务是否成功启动
[root@master1 ~]# for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status etcd|grep Active"
done
>>> 192.168.66.131
Active: active (running) since 六 2022-04-09 09:26:45 CST; 1min 19s ago
>>> 192.168.66.132
Active: active (running) since 六 2022-04-09 09:26:45 CST; 1min 19s ago
>>> 192.168.66.133
Active: active (running) since 六 2022-04-09 09:26:46 CST; 1min 19s ago
[root@master1 ~]#
- 确保状态为active (running),否则查看日志:
[root@master1 ~]# journalctl -u etcd
[root@master1 ~]# journalctl -f -u etcd
[root@master1 ~]# journalctl -xe
2、检查etcd服务健康状况
[root@master1 ~]# for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
/opt/k8s/bin/etcdctl \
--endpoints=https://${node_ip}:2379 \
--cacert=/opt/k8s/etc/cert/ca.pem \
--cert=/opt/k8s/etcd/cert/etcd.pem \
--key=/opt/k8s/etcd/cert/etcd-key.pem endpoint health
done
>>> 192.168.66.131
https://192.168.66.131:2379 is healthy: successfully committed proposal: took = 7.961316ms
>>> 192.168.66.132
https://192.168.66.132:2379 is healthy: successfully committed proposal: took = 7.267523ms
>>> 192.168.66.133
https://192.168.66.133:2379 is healthy: successfully committed proposal: took = 7.439153ms
[root@master1 ~]#
3、查看当前Leader节点
[root@master1 ~]# /opt/k8s/bin/etcdctl \
-w table --cacert=/opt/k8s/etc/cert/ca.pem \
--cert=/opt/k8s/etcd/cert/etcd.pem \
--key=/opt/k8s/etcd/cert/etcd-key.pem \
--endpoints=${ETCD_ENDPOINTS} endpoint status
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.66.131:2379 | 7d29ae41bb99f529 | 3.5.2 | 20 kB | false | false | 2 | 12 | 12 | |
| https://192.168.66.132:2379 | d6b8df73604cb565 | 3.5.2 | 20 kB | true | false | 2 | 12 | 12 | |
| https://192.168.66.133:2379 | a94bb46803f3eba2 | 3.5.2 | 20 kB | false | false | 2 | 12 | 12 | |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
- 从上面的输出可以看出,当前的leader为192.168.66.132
四、etcdctl常用命令
# 存入
etcdctl put key value
# 读取
etcdctl get key
# 区间查找
etcdctl get key1 key2
# 只读取key的值
etcdctl get --print-value-only key
# 读取key开头的数据
etcdctl get --prefix key
# 读取所有key
etcdctl get / --prefix --keys-only
# 从key开始读取后面的数据
etcdctl get --from-key key
# 查找所有的key-value
etcdctl get --from-key ""
# 删除
etcdctl del key
etcdctl del --prev-kv key
etcdctl del --prev-kv --from-key key
etcdctl del --prev-kv --prefix key
# 删除所有数据
etcdctl del --prefix ""
五、etcd备份和恢复
# 备份快照:可以定期执行
etcdctl snapshot save /data/backup/xxxx.db
# 恢复步骤
1、关闭所有Master节点的 Etcd服务 systemctl stop etcd
2、备份 ETCD 存储目录下数据
3、拷贝 ETCD 备份快照到每个Etcd节点
4、在每个节点执行恢复命令
ETCDCTL_API=3 etcdctl snapshot restore /data/backup/etcd-snapshot-xxxxx.db \
--name etcd-0 \
--initial-cluster "master1=https://192.168.66.131:2380,master2=https://192.168.66.132:2380,master3=https://192.168.66.133:2380" \
--initial-cluster-token etcd-cluster \
--initial-advertise-peer-urls https://192.168.66.131:2380 \
--data-dir=/var/lib/etcd/default.etcd
- 参考 zhuanlan.zhihu.com/p/101523337
- 集群的状态数据都存储在Etcd中,及时备份,完善的恢复流程和脚本很重要
- 先用起来,通过操作实践认识kubernetes(k8s),积累多了自然就理解了
- 把理解的知识分享出来,自造福田,自得福缘
- 追求简单,容易使人理解,知识的上下文也是知识的一部分,例如版本,时间等
- 欢迎留言交流,也可以提出问题,一般在周末回复和完善文档
- Jason@vip.qq.com 2022-4-8