2-sealer-安装kubernetes集群

403 阅读12分钟

1-sealer-安装Kubernetes集群

一、 资源准备

  1. 机器资源准备
    • 注意:所有的服务器用户名密码必须保持一致
    • 注意:所有的服务器用户名密码必须保持一致
    • 注意:所有的服务器用户名密码必须保持一致
    • 利用portal139服务器,将master140、master141、slave142、slave143、slave144、slave145安装成二主四从的Kubernetes集群
机器信息IP地址登录用户名称登录密码配置信息
xincan-sealer-portal139-10.1.90.139-long10.1.90.139************配置信息:8核16G,100G硬盘
xincan-sealer-master140-10.1.90.140-long10.1.90.140************配置信息:8核16G,100G硬盘
xincan-sealer-master141-10.1.90.141-long10.1.90.141************配置信息:8核16G,100G硬盘
xincan-sealer-slave142-10.1.90.142-long10.1.90.142************配置信息:8核16G,100G硬盘
xincan-sealer-slave143-10.1.90.143-long10.1.90.143************配置信息:8核16G,100G硬盘
xincan-sealer-slave144-10.1.90.144-long10.1.90.144************配置信息:8核16G,100G硬盘
xincan-sealer-slave145-10.1.90.145-long10.1.90.145************配置信息:8核16G,100G硬盘
  1. 编写Sealer file文件,文件名称为Clusterfile
[root@portal139 sealer]# cat Clusterfile
apiVersion: sealer.aliyun.com/v1alpha1
kind: Cluster
metadata:
  name: hatech-cluster
spec:
  image: dev.hb.hatech.com.cn/library/kubernetes:v1.19.9
  provider: BAREMETAL
  ssh:
    # ssh的登录密码,如果使用的密钥登录则无需设置
    passwd: hatech1618
    # ssh的私钥文件绝对路径,例如/root/.ssh/id_rsa
    pk: /root/.ssh/id_rsa
    # ssh的私钥文件密码,如果没有的话就设置为""
    pkPasswd: ""
    # ssh登录用户
    user: root
  network:
    # 使用的网卡名称
    interface: ens192
    # 网络插件
    cniName: calico
    podCIDR: 100.64.0.0/10
    svcCIDR: 10.96.0.0/22
    withoutCNI: false
  certSANS:
    - aliyun-inc.com
    - 10.0.0.2
  hosts:
  - ips:
    - 10.1.90.140
    - 10.1.90.141
    roles:
    - master
    ssh:
      port: "22"
  - ips:
    - 10.1.90.142
    - 10.1.90.143
    - 10.1.90.144
    - 10.1.90.145
    roles:
    - node
    ssh:
      port: "22"
[root@portal139 sealer]#
  1. 执行sealer进行安装
[root@portal139 sealer]# sealer apply -f Clusterfile
  1. 安装过程查看
    • master [10.1.90.140 10.1.90.141] 为主节点
    • worker [10.1.90.142 10.1.90.143 10.1.90.144 10.1.90.145] 为子节点
2022-06-22 11:55:17 [INFO] [local.go:288] Start to create a new cluster: master [10.1.90.140 10.1.90.141], worker [10.1.90.142 10.1.90.143 10.1.90.144 10.1.90.145]
copying files to 10.1.90.144: 32/32
copying files to 10.1.90.140: 284/284
copying files to 10.1.90.143: 32/32
copying files to 10.1.90.142: 32/32
copying files to 10.1.90.145: 32/32
copying files to 10.1.90.141: 32/32
copying files to 10.1.90.140: 310/310
ernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost master140.sealer.hatech.com.cn:master140.sealer.hatech.com.cn] map[10.1.90.140:10.1.90.140 10.1.90.141:10.1.90.141 10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 172.16.0.181:172.16.0.181]}
2022-06-22 11:59:01 [INFO] [kube_certs.go:254] Etcd altnames : {map[localhost:localhost master140.sealer.hatech.com.cn:master140.sealer.hatech.com.cn] map[10.1.90.140:10.1.90.140 127.0.0.1:127.0.0.1 ::1:::1]}, commonName : master140.sealer.hatech.com.cn
2022-06-22 11:59:18 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "admin.conf" kubeconfig file

2022-06-22 11:59:19 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "controller-manager.conf" kubeconfig file

2022-06-22 11:59:19 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "scheduler.conf" kubeconfig file

2022-06-22 11:59:20 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "kubelet.conf" kubeconfig file

++ dirname init-registry.sh
+ cd .
+ REGISTRY_PORT=5000
+ VOLUME=/var/lib/sealer/data/hatech-cluster/rootfs/registry
+ REGISTRY_DOMAIN=sea.hub
+ container=sealer-registry
+++ pwd
++ dirname /var/lib/sealer/data/hatech-cluster/rootfs/scripts
+ rootfs=/var/lib/sealer/data/hatech-cluster/rootfs
+ config=/var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_config.yml
+ htpasswd=/var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_htpasswd
+ certs_dir=/var/lib/sealer/data/hatech-cluster/rootfs/certs
+ image_dir=/var/lib/sealer/data/hatech-cluster/rootfs/images
+ mkdir -p /var/lib/sealer/data/hatech-cluster/rootfs/registry
+ load_images
+ for image in '"$image_dir"/*'
+ '[' -f /var/lib/sealer/data/hatech-cluster/rootfs/images/registry.tar ']'
+ docker load -q -i /var/lib/sealer/data/hatech-cluster/rootfs/images/registry.tar
Loaded image: registry:2.7.1
++ docker ps -aq -f name=sealer-registry
+ '[' '' ']'
+ regArgs='-d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key'
+ '[' -f /var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_config.yml ']'
+ sed -i s/5000/5000/g /var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_config.yml
+ regArgs='-d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key     -v /var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_config.yml:/etc/docker/registry/config.yml'
+ '[' -f /var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_htpasswd ']'
+ docker run -d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key -v /var/lib/sealer/data/hatech-cluster/rootfs/etc/registry_config.yml:/etc/docker/registry/config.yml registry:2.7.1
9fcdfec5b5616611a6375668105c32b29823ef53fbf1521a263a3413a84dd39d
+ check_registry
+ n=1
copying files to 10.1.90.140: 314/314
++ docker inspect --format '{{json .State.Status}}' sealer-registry
+ registry_status='"running"'
+ [[ "running" == \"running\" ]]
+ break
2022-06-22 11:59:24 [INFO] [init.go:251] start to init master0...
2022-06-22 12:02:05 [INFO] [init.go:206] join command is: kubeadm join  apiserver.cluster.local:6443 --token s69jo6.wp72gy3gozmhkd1i \
    --discovery-token-ca-cert-hash sha256:953e1406b229f6ecea18ce7c9c3e41cc565b5ad7f053801ab5a0b559e859f664 \
    --control-plane --certificate-key cef95ea5b16b3217dac468693318a217a3b9bf86221ce5b98eeb25a7d0184443


2022-06-22 12:02:05 [INFO] [runtime.go:76] [10.1.90.141] will be added as master
2022-06-22 12:02:09 [INFO] [init.go:206] join command is: kubeadm join  apiserver.cluster.local:6443 --token milpmk.0wky04qtvxmzrfq9     --discovery-token-ca-cert-hash sha256:953e1406b229f6ecea18ce7c9c3e41cc565b5ad7f053801ab5a0b559e859f664

2022-06-22 12:02:09 [INFO] [masters.go:520] join token: milpmk.0wky04qtvxmzrfq9 hash: sha256:953e1406b229f6ecea18ce7c9c3e41cc565b5adcopying files to 10.1.90.141: 59/59
2022-06-22 12:02:20 [INFO] [masters.go:351] Start to join 10.1.90.141 as master
2022-06-22 12:02:21 [INFO] [kube_certs.go:234] APIserver altNames :  {map[apiserver.cluster.local:apiserver.cluster.local kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost master141.sealer.hatech.com.cn:master141.sealer.hatech.com.cn] map[10.1.90.140:10.1.90.140 10.1.90.141:10.1.90.141 10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 172.16.0.181:172.16.0.181]}2022-06-22 12:02:21 [INFO] [kube_certs.go:254] Etcd altnames : {map[localhost:localhost master141.sealer.hatech.com.cn:master141.sealer.hatech.com.cn] map[10.1.90.141:10.1.90.141 127.0.0.1:127.0.0.1 ::1:::1]}, commonName : master141.sealer.hatech.com.cn
2022-06-22 12:02:21 [INFO] [kube_certs.go:262] sa.key sa.pub already exist
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using the existing "apiserver-etcd-client" certificate and key
[certs] Using the existing "etcd/healthcheck-client" certificate and key
[certs] Using the existing "etcd/server" certificate and key
[certs] Using the existing "etcd/peer" certificate and key
[certs] Using the existing "apiserver" certificate and key
[certs] Using the existing "apiserver-kubelet-client" certificate and key
[certs] Using the existing "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
W0622 12:03:42.453726    3902 kubeconfig.go:242] a kubeconfig file "/etc/kubernetes/controller-manager.conf" exists already but has an unexpected API Server URL: expected: https://10.1.90.141:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
W0622 12:03:42.888048    3902 kubeconfig.go:242] a kubeconfig file "/etc/kubernetes/scheduler.conf" exists already but has an unexpected API Server URL: expected: https://10.1.90.141:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Creating static Pod manifest for "etcd"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master141.sealer.hatech.com.cn as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master141.sealer.hatech.com.cn as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
copying files to 10.1.90.144: 34/34

copying files to 10.1.90.143: 34/34
copying files to 10.1.90.142: 34/34
copying files to 10.1.90.145: 34/34
2022-06-22 12:04:04 [INFO] [runtime.go:83] [10.1.90.142 10.1.90.143 10.1.90.144 10.1.90.145] will be added as worker
2022-06-22 12:04:16 [INFO] [init.go:206] join command is: kubeadm join  apiserver.cluster.local:6443 --token 7kkyxj.epewlhup5bs5rjh6     --discovery-token-ca-cert-hash sha256:953e1406b229f6ecea18ce7c9c3e41cc565b5ad7f053801ab5a0b559e859f664

2022-06-22 12:04:16 [INFO] [masters.go:520] join token: 7kkyxj.epewlhup5bs5rjh6 hash: sha256:953e1406b229f6ecea18ce7c9c3e41cc565b5ad7f053801ab5a0b559e859f664 certifacate key:
2022-06-22 12:04:16 [INFO] [nodes.go:92] Start to join 10.1.90.145 as worker
2022-06-22 12:04:16 [INFO] [nodes.go:92] Start to join 10.1.90.143 as worker
2022-06-22 12:04:16 [INFO] [nodes.go:92] Start to join 10.1.90.144 as worker
2022-06-22 12:04:16 [INFO] [nodes.go:92] Start to join 10.1.90.142 as worker
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:16] VsAndRsCare DeleteVirtualServer
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:75] DeleteVirtualServer error:  no such process
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:19] VsAndRsCare DeleteVirtualServer: no such process
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:129] IsVirtualServerAvailable warn: virtual server is empty.
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:16] VsAndRsCare DeleteVirtualServer
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:75] DeleteVirtualServer error:  no such process
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:19] VsAndRsCare DeleteVirtualServer: no such process
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:129] IsVirtualServerAvailable warn: virtual server is empty.
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:16] VsAndRsCare DeleteVirtualServer
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:75] DeleteVirtualServer error:  no such process
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:19] VsAndRsCare DeleteVirtualServer: no such process
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:129] IsVirtualServerAvailable warn: virtual server is empty.
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:16] VsAndRsCare DeleteVirtualServer
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:75] DeleteVirtualServer error:  no such process
2022-06-22 12:04:17 [WARN] [sealer/vendor/github.com/sealyun/lvscare/care/care.go:19] VsAndRsCare DeleteVirtualServer: no such process
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/service/service.go:129] IsVirtualServerAvailable warn: virtual server is empty.
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.140, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.1.90.141, Port: 6443
2022-06-22 12:04:17 [DEBG] [sealer/vendor/github.com/sealyun/lvscare/utils/utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
[preflight] Running pre-flight checks
[preflight] Running pre-flight checks
[preflight] Running pre-flight checks
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

2022-06-22 12:04:45 [INFO] [nodes.go:115] Succeeded in joining 10.1.90.145 as worker
2022-06-22 12:04:45 [INFO] [nodes.go:115] Succeeded in joining 10.1.90.144 as worker

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

2022-06-22 12:04:46 [INFO] [nodes.go:115] Succeeded in joining 10.1.90.143 as worker

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

2022-06-22 12:04:46 [INFO] [nodes.go:115] Succeeded in joining 10.1.90.142 as worker
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/imagesets.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/tigerastatuses.operator.tigera.io created
namespace/tigera-operator created
podsecuritypolicy.policy/tigera-operator created
serviceaccount/tigera-operator created
clusterrole.rbac.authorization.k8s.io/tigera-operator created
clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
deployment.apps/tigera-operator created
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
2022-06-22 12:04:50 [INFO] [local.go:298] Succeeded in creating a new cluster, enjoy it!
[root@portal139 sealer]#
  1. 过程中会报“[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at kubernetes.io/docs/setup/…错误,提示将cgroupfs替换成systemd

    • 停止kubelet服务,所有机器执行

      [root@master140 ~]# systemctl stop kubelet
      
    • 修改/var/lib/kubelet/config.yaml,找到cgroupDriver: cgroups,修改成 cgroupDriver: systemd每台机器执行

    # sudo sed -i 's/cgroupDriver: cgroupfs/cgroupDriver: systemd/' /var/lib/kubelet/config.yaml
    [root@master141 ~]# vim /var/lib/kubelet/config.yaml
    
    apiVersion: kubelet.config.k8s.io/v1beta1
    authentication:
      anonymous:
        enabled: false
      webhook:
        cacheTTL: 2m0s
        enabled: true
      x509:
        clientCAFile: /etc/kubernetes/pki/ca.crt
    authorization:
      mode: Webhook
      webhook:
        cacheAuthorizedTTL: 5m0s
        cacheUnauthorizedTTL: 30s
    # 修改为:systemd
    cgroupDriver: systemd
    cgroupsPerQOS: true
    clusterDNS:
    - 10.96.0.10
    clusterDomain: cluster.local
    configMapAndSecretChangeDetectionStrategy: Watch
    containerLogMaxFiles: 5
    containerLogMaxSize: 10Mi
    contentType: application/vnd.kubernetes.protobuf
    cpuCFSQuota: true
    cpuCFSQuotaPeriod: 100ms
    cpuManagerPolicy: none
    cpuManagerReconcilePeriod: 10s
    enableControllerAttachDetach: true
    enableDebuggingHandlers: true
    enforceNodeAllocatable:
    ........................................
    
  • 修改daemon.json 源文件如下
[root@master140 ~]# cat /etc/docker/daemon.json
{
  "max-concurrent-downloads": 20,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  },
  "mirror-registries": [
   {
    "domain": "*",
    "mirrors": ["https://sea.hub:5000"]
   }
  ],
  "data-root": "/var/lib/docker"
}
[root@master140 ~]#
[root@master140 ~]# cat /etc/docker/daemon.json
{
  "max-concurrent-downloads": 20,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  },
  "mirror-registries": [
   {
    "domain": "*",
    "mirrors": ["https://sea.hub:5000"]
   }
  ],
  "data-root": "/var/lib/docker",
  # 增加如下两行代码
  "insecure-registries":["https://dev.hb.hatech.com.cn"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
[root@master140 ~]#
  1. 重启docker和kubelet,所有机器执行
[root@master140 ~]# systemctl daemon-reload && systemctl restart docker && systemctl start kubelet
  1. 查看是否切换成systemd
[root@master140 ~]# docker info | grep Cgroup
 Cgroup Driver: systemd
[root@master140 ~]#
  1. 登录镜像仓库,每台机器执行
[root@master140 ~]# docker login dev.hb.hatech.com.cn
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@master140 ~]#
  1. 查看kubernetes集群信息
    • 可以看到二主四从的kubernetes集群创建好了
# 查询集群节点信息
[root@master140 ~]# kubectl get nodes -o wide
NAME                             STATUS   ROLES    AGE    VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
master140.sealer.hatech.com.cn   Ready    master   172m   v1.19.9   10.1.90.140   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
master141.sealer.hatech.com.cn   Ready    master   170m   v1.19.9   10.1.90.141   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
slave142.sealer.hatech.com.cn    Ready    <none>   169m   v1.19.9   10.1.90.142   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
slave143.sealer.hatech.com.cn    Ready    <none>   169m   v1.19.9   10.1.90.143   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
slave144.sealer.hatech.com.cn    Ready    <none>   169m   v1.19.9   10.1.90.144   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
slave145.sealer.hatech.com.cn    Ready    <none>   169m   v1.19.9   10.1.90.145   <none>        CentOS Linux 7 (Core)   5.4.199-1.el7.elrepo.x86_64   docker://19.3.14
[root@master140 ~]#

# 查询集群的命名空间
[root@master140 ~]# kubectl get ns
NAME               STATUS   AGE
calico-apiserver   Active   167m
calico-system      Active   168m
default            Active   171m
kube-node-lease    Active   171m
kube-public        Active   171m
kube-system        Active   171m
tigera-operator    Active   168m

# 查询 kube-system 命名空间下的pod
[root@master140 ~]# kubectl -n kube-system get pod
NAME                                                     READY   STATUS    RESTARTS   AGE
coredns-55bcc669d7-6qbpc                                 1/1     Running   0          171m
coredns-55bcc669d7-84fwg                                 1/1     Running   0          171m
etcd-master140.sealer.hatech.com.cn                      1/1     Running   2          171m
etcd-master141.sealer.hatech.com.cn                      1/1     Running   1          170m
kube-apiserver-master140.sealer.hatech.com.cn            1/1     Running   2          171m
kube-apiserver-master141.sealer.hatech.com.cn            1/1     Running   3          170m
kube-controller-manager-master140.sealer.hatech.com.cn   1/1     Running   3          171m
kube-controller-manager-master141.sealer.hatech.com.cn   1/1     Running   1          170m
kube-lvscare-slave142.sealer.hatech.com.cn               1/1     Running   0          168m
kube-lvscare-slave143.sealer.hatech.com.cn               1/1     Running   0          168m
kube-lvscare-slave144.sealer.hatech.com.cn               1/1     Running   0          169m
kube-lvscare-slave145.sealer.hatech.com.cn               1/1     Running   0          169m
kube-proxy-4qwm9                                         1/1     Running   0          169m
kube-proxy-976lz                                         1/1     Running   2          171m
kube-proxy-dlzht                                         1/1     Running   0          170m
kube-proxy-qxxsv                                         1/1     Running   0          169m
kube-proxy-rjpz8                                         1/1     Running   0          169m
kube-proxy-v9d7s                                         1/1     Running   0          169m
kube-scheduler-master140.sealer.hatech.com.cn            1/1     Running   3          171m
kube-scheduler-master141.sealer.hatech.com.cn            1/1     Running   1          170m