k8s nfs高可用文件系统
-
准备2台文件服务器
- 这里为了方便就使用k8s的master01和master02上进行搭建
- master01 192.168.168.201 keepalived nfs rsync inotify-tools
- master02 192.168.168.202 keepalived nfs rsync inotify-tools
-
分别安装nfs
#安装nfs
yum -y install nfs-utils rpcbind
#创建共享目录
mkdir /data/k8s
chmod -R 777 /data/k8s
#编辑exports文件
vim /etc/exports
/data/k8s 192.168.168.0/24(rw,sync,no_root_squash)
#配置生效
exportfs -r
#查看生效
exportfs
#启动rpcbind、nfs服务
systemctl restart rpcbind && systemctl enable rpcbind
systemctl restart nfs && systemctl enable nfs
#查看 RPC 服务的注册状况
rpcinfo -p localhost
#或者到k8s的任意一个node节点上手动尝试挂载NFS,看是否挂载成功
mkdir /haha
mount -t nfs 192.168.168.201:/data/k8s /haha
#查看挂载
df -h
#取消挂载
umount /haha
rm -rf /haha
-
配置keepalived
- keepalived安装
yum install -y keepalived- keepalived启动
systemctl start keepalived && systemctl enable keepalived- master01配置
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak vim /etc/keepalived/keepalived.confglobal_defs { router_id master01 } vrrp_script check_nfs { script "killall -0 nfsd" interval 2 timeout 1 fall 2 } vrrp_instance nfs { state MASTER #设置为主服务器 interface eth0 #监测网络接口 virtual_router_id 199 #主、备必须一样 priority 150 #(主、备机取不同的优先级,主机值较大,备份机值较小,值越大优先级越高) advert_int 1 #VRRP Multicast广播周期秒数 nopreempt authentication { auth_type PASS #VRRP认证方式,主备必须一致 auth_pass 1111 #(密码) } virtual_ipaddress { 192.168.168.199/24 #VRRP HA虚拟地址 } track_script { check_nfs } }- master02配置
global_defs { router_id master02 } vrrp_script check_nfs { script "killall -0 nfsd" interval 2 timeout 1 fall 2 } vrrp_instance VI_1 { state BACKUP #设置为备用服务器 interface eth0 #监测网络接口 virtual_router_id 199 #主、备必须一样 priority 100 #(主、备机取不同的优先级,主机值较大,备份机值较小,值越大优先级越高) advert_int 1 #VRRP Multicast广播周期秒数 authentication { auth_type PASS #VRRP认证方式,主备必须一致 auth_pass 1111 #(密码) } virtual_ipaddress { 192.168.168.199/24 #VRRP HA虚拟地址 } track_script { check_nfs } }配置完成后分别重启keepalived
systemctl restart keepalived -
安装部署Rsync+Inofity
- 安装rsync和inotify
#先安装扩展包源,否则inotify-tools找不到 yum install -y epel-release yum -y install rsync inotify-tools- master配置rsyncd
vim /etc/rsyncd.conf注意:/etc/rsyncd.conf文件注释不能写在配置项后面,同步文件时会出错
uid = root gid = root use chroot = 0 port = 873 #允许ip访问设置,可以指定ip或ip段 hosts allow = 192.168.168.0/24 max connections = 0 timeout = 300 pid file = /var/run/rsyncd.pid lock file = /var/run/rsyncd.lock log file = /var/log/rsyncd.log log format = %t %a %m %f %b transfer logging = yes syslog facility = local3 [master_web] path = /data/k8s comment = master_web ignore errors #是否允许客户端上传文件 read only = no list = no #指定由空格或逗号分隔的用户名列表,只有这些用户才允许连接该模块 auth users = rsync #保存密码和用户名文件,需要自己生成 secrets file = /etc/rsyncd.passwd- 编辑密码和用户文件(格式为
"用户名:密码")
vim /etc/rsyncd.passwdrsync:123456- 编辑同步密码(backup机器的rsyncd密码)
vim /opt/rsyncd/rsyncd.passwd123456- 设置文件权限
chmod 600 /etc/rsyncd.passwd chmod 600 /opt/rsyncd/rsyncd.passwd- 启动服务
systemctl enable rsyncd && systemctl restart rsyncd- backup配置rsyncd
vim /etc/rsyncd.conf注意:/etc/rsyncd.conf文件注释不能写在配置项后面,同步文件时会出错
uid = root gid = root use chroot = 0 port = 873 #允许ip访问设置,可以指定ip或ip段 hosts allow = 192.168.168.0/24 max connections = 0 timeout = 300 pid file = /var/run/rsyncd.pid lock file = /var/run/rsyncd.lock log file = /var/log/rsyncd.log log format = %t %a %m %f %b transfer logging = yes syslog facility = local3 [slave_web] path = /data/k8s comment = slave_web ignore errors #是否允许客户端上传文件 read only = no list = no #指定由空格或逗号分隔的用户名列表,只有这些用户才允许连接该模块 auth users = rsync #保存密码和用户名文件,需要自己生成 secrets file = /etc/rsyncd.passwd- 编辑密码和用户文件(格式为
"用户名:密码")
vim /etc/rsyncd.passwdrsync:123456- 编辑同步密码(master机器的rsyncd密码)
vim /opt/rsyncd/rsyncd.passwd123456- 设置文件权限
chmod 600 /etc/rsyncd.passwd chmod 600 /opt/rsyncd/rsyncd.passwd- 启动服务
systemctl enable rsyncd && systemctl restart rsyncd- 手动验证文件同步
#在master上添加文件夹和文件 touch /data/k8s/{a,b} mkdir /data/k8s/test #手动同步文件 rsync -avzp --delete /data/k8s/ rsync@192.168.168.201::slave_web --password-file=/opt/rsyncd/rsyncd.passwd #到backup节点查看 ls /data/k8s a b test -
设置Rsync+Inotify自动同步
注意:不能设置Master和Slave节点同时执行rsync自动同步,即不能同时设置双向同步。因为Master节点将数据同步到Slave节点,如果Slave节点再将数据同步回到Master节点,这个就矛盾了。所以需要确保只有一方在执行自动同步到另一方的操作。方式就是判断当前节点服务器是否存在VIP,如存在VIP则自动同步数据到另一台节点上。如不存在VIP则不执行自动同步操作。
- master 配置
编写自动同步脚本
vim /opt/rsyncd/rsync_inotify.sh#!/bin/bash host=192.168.168.202 src=/data/k8s/ des=slave_web password=/opt/rsyncd/rsyncd.passwd user=rsync inotifywait=/usr/bin/inotifywait $inotifywait -mrq --timefmt '%Y%m%d %H:%M' --format '%T %w%f%e' -e modify,delete,create,attrib $src \ | while read files ;do rsync -avzP --delete --timeout=100 --password-file=${password} $src $user@$host::$des echo "${files} was rsynced" >>/tmp/rsync.log 2>&1 done编写VIP监控脚本
vim /opt/rsyncd/vip_monitor.sh#!/bin/bash VIP_NUM=`ip addr|grep 192.168.168.199|wc -l` RSYNC_INOTIRY_NUM=`ps -ef|grep /usr/bin/inotifywait|grep -v grep|wc -l` if [ ${VIP_NUM} -ne 0 ];then echo "VIP在当前NFS节点服务器上" >/dev/null 2>&1 if [ ${RSYNC_INOTIRY_NUM} -ne 0 ];then echo "rsync_inotify.sh脚本已经在后台执行中" >/dev/null 2>&1 else echo "需要在后台执行rsync_inotify.sh脚本" >/dev/null 2>&1 nohup sh /opt/rsyncd/rsync_inotify.sh & fi else echo "VIP不在当前NFS节点服务器上" >/dev/null 2>&1 if [ ${RSYNC_INOTIRY_NUM} -ne 0 ];then echo "需要关闭后台执行的rsync_inotify.sh脚本" >/dev/null 2>&1 ps -ef|grep rsync_inotify.sh|grep -v grep|awk '{print $2}'|xargs kill -9 ps -ef|grep inotifywait|grep -v grep|awk '{print $2}'|xargs kill -9 else echo "rsync_inotify.sh脚本当前未执行" >/dev/null 2>&1 fi fi编写持续执行脚本
vim /opt/rsyncd/rsync_monit.sh#!/bin/bash while [ "1" = "1" ] do /bin/bash -x /opt/rsyncd/vip_monitor.sh >/dev/null 2>&1 done脚本授权
chmod +x /opt/rsyncd/*.sh后台运行脚本
nohup sh /opt/rsyncd/rsync_monit.sh &添加开机启动脚本
chmod +x /etc/rc.d/rc.local echo "nohup sh /opt/rsyncd/rsync_monit.sh & " >> /etc/rc.d/rc.local -
backup 配置
编写自动同步脚本
vim /opt/rsyncd/rsync_inotify.sh#!/bin/bash host=192.168.168.201 src=/data/k8s/ des=master_web password=/opt/rsyncd/rsyncd.passwd user=rsync inotifywait=/usr/bin/inotifywait $inotifywait -mrq --timefmt '%Y%m%d %H:%M' --format '%T %w%f%e' -e modify,delete,create,attrib $src \ | while read files ;do rsync -avzP --delete --timeout=100 --password-file=${password} $src $user@$host::$des echo "${files} was rsynced" >>/tmp/rsync.log 2>&1 done编写VIP监控脚本
vim /opt/rsyncd/vip_monitor.sh#!/bin/bash VIP_NUM=`ip addr|grep 192.168.168.199|wc -l` RSYNC_INOTIRY_NUM=`ps -ef|grep /usr/bin/inotifywait|grep -v grep|wc -l` if [ ${VIP_NUM} -ne 0 ];then echo "VIP在当前NFS节点服务器上" >/dev/null 2>&1 if [ ${RSYNC_INOTIRY_NUM} -ne 0 ];then echo "rsync_inotify.sh脚本已经在后台执行中" >/dev/null 2>&1 else echo "需要在后台执行rsync_inotify.sh脚本" >/dev/null 2>&1 nohup sh /opt/rsyncd/rsync_inotify.sh & fi else echo "VIP不在当前NFS节点服务器上" >/dev/null 2>&1 if [ ${RSYNC_INOTIRY_NUM} -ne 0 ];then echo "需要关闭后台执行的rsync_inotify.sh脚本" >/dev/null 2>&1 ps -ef|grep rsync_inotify.sh|grep -v grep|awk '{print $2}'|xargs kill -9 ps -ef|grep inotifywait|grep -v grep|awk '{print $2}'|xargs kill -9 else echo "rsync_inotify.sh脚本当前未执行" >/dev/null 2>&1 fi fi编写持续执行脚本
vim /opt/rsyncd/rsync_monit.sh#!/bin/bash while [ "1" = "1" ] do /bin/bash -x /opt/rsyncd/vip_monitor.sh >/dev/null 2>&1 done脚本授权
chmod +x /opt/rsyncd/*.sh后台运行脚本
nohup sh /opt/rsyncd/rsync_monit.sh &添加开机启动脚本
chmod +x /etc/rc.d/rc.local echo "nohup sh /opt/rsyncd/rsync_monit.sh & " >> /etc/rc.d/rc.local -
测试-通过PV-PVC挂载mysql数据库
- pv-nfs.yaml
mkdir /data/k8s/pv001 vim pv-nfs.yamlapiVersion: v1 kind: PersistentVolume metadata: name: pv001 spec: capacity: storage: 1Gi volumeMode: Filesystem accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain storageClassName: nfs nfs: path: /data/k8s/pv001 server: 192.168.168.199- pvc-nfs.yaml
vim pvc-nfs.yamlapiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc001 spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: nfs- 创建pv和pvc
kubectl apply -f pv-nfs.yaml kubectl apply -f pvc-nfs.yaml- mysql-secret.yaml
vim mysql-secret.yaml秘钥内容必须为base64
apiVersion: v1 kind: Secret metadata: name: mysql-pass type: Opaque data: password: cXdlMTIz- mysql.deployment.yaml
vim mysql-deployment.yamlapiVersion: v1 kind: Service metadata: name: mysql labels: app: mysql spec: type: LoadBalancer ports: - port: 3306 targetPort: 3306 selector: app: mysql tier: mysql --- apiVersion: apps/v1 kind: Deployment metadata: name: mysql labels: app: mysql spec: selector: matchLabels: app: mysql tier: mysql strategy: type: Recreate template: metadata: labels: app: mysql tier: mysql spec: containers: - image: mysql:5.6 name: mysql env: - name: MYSQL_ROOT_PASSWORD valueFrom: secretKeyRef: name: mysql-pass key: password ports: - containerPort: 3306 name: mysql volumeMounts: - name: mysql-persistent-storage mountPath: /var/lib/mysql volumes: - name: mysql-persistent-storage persistentVolumeClaim: claimName: pvc001kubectl apply -f mysql-deployment.yaml