8.基于 Kubernetes v1.25 (二进制) 和 Docker部署高可用集群(三)

513 阅读11分钟

“我报名参加金石计划1期挑战——瓜分10万奖池,这是我的第5篇文章,点击查看活动详情

9.安装Calico

docs.projectcalico.org/maintenance…

calico安装:docs.projectcalico.org/getting-sta…

[root@k8s-master01 ~]# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O

修改calico-etcd.yaml的以下位置

[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
​
[root@k8s-master01 ~]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"#g' calico-etcd.yaml
​
[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml
  etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"
​
[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null
​
[root@k8s-master01 ~]# ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CA=`cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'`
​
[root@k8s-master01 ~]# sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml
​
[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml
  etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdldFU2FVU2ZXVms1WXEvSVlqREtlNHdFUFVMMFlBaUJjWHRDUzlWWDJRbzFvUm5WCnRLMFQvbm02Mnl2NDh0Q0R6amoxOElyR2lyZXk1TVBaYzhJYnJmK3VWZFBtSzJPNXh2bFZSam9Xb0FwNkFlMWQKMU9YLzBlQjkreFJSbTlXNVM3Y3RDNTJCcmRLZTQrTnlnejBlWjFGdnU5dWFvWXI3djJ3V3lkTnlSc0FJNzFBdApWYUhMeWZrdU9OaWVtUCs5WmdOSHdxdVhYVHZQZDk5cTRtSFM3NGRuTTdRNGs5ekoyWitmaEtIcTdUY1RwalV4CmR0Rk5DeEo0TUYvcTdsbVVnMzVvRjY4UzBmL2F2c0xPK0dQa3p3QWxTREUycWovRy9ISWIzVnVjWHRtL1ZOSGsKTERMMmMrVE55cEwzSmFaOFdkMlRvTm9QbGYwV2VmYXFxYVZCSHdJREFRQUJBb0lCQUVoay9HSExmdExzSGRqUQo4OXc5WkpvNEcwTEQvcUZiZndnL28wZFVCaUlOR2hVOHlBb3FnOU1xKzdoZkplckV0UzQzYmlVSEdBRGRXb0RECk1iUmp4cXNNYTd1WnBvcDlzLzRSUUh6NUpiOEdZaitzV3N0UkpTczVFMC9SSTZWYjNWckphbW5mU3p5RW9JTGoKWnRvdW1obks4dGl3aVhzcG96VTQvN2tsb0xEVWZmMlVDTGpFb2swTXVRV3VRa3pNbkF0eTJ2SENGcVpaUi9CRApLYWorOWMxb3NnelZLK0lnaHE0b2pWM0pUbVdNSUxJcWx4d3A4blRCTnRYSXpkL001cURTSHI5dWozeXRGUVd6ClBCMGh0V2hGZU5FL09Bck10NDJrZGNJK29ub3p0UVNCUDZtbWpJdEEyY1BYbGczNjhVSHNDYm0wNjJuVDQxbzQKdUIwZUpiRUNnWUVBMnpSL3c5SFFKSUFVdFdQTWlLSnJybzVWb0NyZU11U3hHS0I4amhXbTZycWF6a29yaEcyZQpvVThqMC85alVqU005enBsa2NQblRTbXkvMS8ybSt4cTA4RlJ6YUsyb2dQNEFzVENkakdFbmNLSllsc0tORjR5CnJjWEIxZkhORldsVGNMYStEMlRYR1BGeExtOHBjclBUWGFqSHk3a1VqZGV4UU5DV0lpNmpWZWtDZ1lFQTNTcnIKVXY3bnlTS1AxVEVQZW4yWUd5VVFGWG1HYVdiZ0E1TlV2ZFlud1QyTnViK2k5Ui92RktPTCtibHUwME9CZ3RZbQpmTEdBMjFKN3F5T3h5bkJhc2ZIYkJra1BJdXNRdWZwT0pxN01zZkZpOVJkUUdBMDkwMzJBWXNvN0VRL0RjUEJQCjJocUExK3dURnZTa2pMeHpBUGZObk1RaU1qVFdDU2t5a1UrUkVjY0NnWUVBak1Bai9iZWVINkhDS2twL2pZaEUKL2x0VWJiaTQzaExzQi9VY3BSc1FCNDUwVWx0Ym4vZkhNS3FoUmNHcG82SXNsWjZBazEwb0N1cm9qWU92MUtTMApYMjIxT2dYdG9SQk5VbkhHcjUvU0dINU9Pc0pjUmczcUdweHRmeE40N1BPcGFTbGQxb0tZZnVLaEd3Q3pxM3YrCkR4aU4yYWpvb3I5SmlHV20wU0lETE9rQ2dZRUFtb1J3MHBScXBiaHkydFNpNXNZMmJjaVFHeUREU29Kb1FiYysKUGo4UG1VK0M0K0pDWEpXSitGWm01ZnFxdGdqMHc4OFFKby93NGNvWHdySjJkV1BYeVhpYXVDSjA0OFZzOHpJdwpSWkJjVEhuRDFFNE1WREYwSDMwUUpIVDduV2p3OThlOVVqaXdDYXJTajNndU9uRXl1OXRsR0lCakQ4YWdQdGc4Cmp3OFM0R01DZ1lCOE4rOXdzQzVoZGkyZ1FiVlAwUzcrVWpDK0xETHYwdEJVUEl2VTJrNDg0eGxDb3lRK1o2d2wKNzFZQXFnL3IrZzl6RzVhU3A5Vm95STVvM3RsVkwrYUZWU0x0MnB1ZHRtTUhGTGsyZ09nMGtDZzJxTW1BVHBOUgpTR3ZwdGs1LzIyQlpBZzFnR3Ewa3dRSTc4MnAvKzR4SWRnL29OZlI0VFVlSzVOakxJZEFjdXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
  etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVCVENDQXUyZ0F3SUJBZ0lVQ2YwZmQrSlVuTXY0Y2J3MUdDLzNzN09qRmkwd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd09USXpNVEF6TVRBd1doZ1BNakV5TWpBNE16QXhNRE14TURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF2V0VTYVVTZldWazUKWXEvSVlqREtlNHdFUFVMMFlBaUJjWHRDUzlWWDJRbzFvUm5WdEswVC9ubTYyeXY0OHRDRHpqajE4SXJHaXJleQo1TVBaYzhJYnJmK3VWZFBtSzJPNXh2bFZSam9Xb0FwNkFlMWQxT1gvMGVCOSt4UlJtOVc1UzdjdEM1MkJyZEtlCjQrTnlnejBlWjFGdnU5dWFvWXI3djJ3V3lkTnlSc0FJNzFBdFZhSEx5Zmt1T05pZW1QKzlaZ05Id3F1WFhUdlAKZDk5cTRtSFM3NGRuTTdRNGs5ekoyWitmaEtIcTdUY1RwalV4ZHRGTkN4SjRNRi9xN2xtVWczNW9GNjhTMGYvYQp2c0xPK0dQa3p3QWxTREUycWovRy9ISWIzVnVjWHRtL1ZOSGtMREwyYytUTnlwTDNKYVo4V2QyVG9Ob1BsZjBXCmVmYXFxYVZCSHdJREFRQUJvNEdtTUlHak1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVLMnVTay81bgp1T1ZiZ2tsQ01XVytHalBtNWxVd1JRWURWUjBSQkQ0d1BJSUthemh6TFdWMFkyUXdNWUlLYXpoekxXVjBZMlF3Ck1vSUthemh6TFdWMFkyUXdNNGNFZndBQUFZY0VyQjhEYkljRXJCOERiWWNFckI4RGJqQU5CZ2txaGtpRzl3MEIKQVFzRkFBT0NBUUVBdE9FMmVxNU41RkgrTE5uK0NhNkZCbWdML1VkL0FodDdXblh1ekpFVFdORWxUR3VuVjdRawp1SXJpNXlOY05uUU5LNjVDVjlheGtRbGMweVQyOE9nVERKVG9zRHhWWDhhR2N6Q1FiOFNjcm55cUgvL1VERXA4Cm5yd2hUTklvVjRJR29EOG9vcGJrTCtvcW42Y0ROZkRNTW5GY0FzWlBPajJkUUh2RmN6MVNPR3ZjUmpKVnVDL1AKY1IzdldIWGlFSlNYcVVoTFFtZnpBZXl4MGszWkhxWGN3TWd6K1hVUkkwM1c4R0NOZFM0dklSSDE0TktxOW9XQwpBMTdPNk1CRUVzWkVlNW4vSXRCeDhNRGllL25VSmIrRmx6RWtZZ1dBVXBuZkIvRUVibUdUei92Sk5IdUVZbG9nCmVlY3ZNcUlOMCttajFjWlBqbWRLNXRydXg1YVp6R3lFQ3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
  etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURvRENDQW9pZ0F3SUJBZ0lVQzNWUnJTdHBzTmRJMm5ZVUFaTW9ncGdGWWpNd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd09USXpNVEF6TURBd1doZ1BNakV5TWpBNE16QXhNRE13TURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF3UWZGdGpvR2JqclkKN0dRbkpvc2pYdVp1ZUJGemVXK0laUXRHeDdoREpGQnpMcHBGVGdoNUltUmhHRUg3ZVFxdEIwaU5SZ0NseG9mRApLKzRRdWVwZjhDMTZGd3BrSi81dk1FNXRETGd6eUYzMXFKZlBUNG5DZWRjR2NzSm55NnFNSXZRNzZ5cGI1SGN4ClA2cEduZXVsU3Y4RktSMVQ1ekJRSzJZMmgrdzZqWUZYWGVoajBXVVRmbVpnYUpEMGdoM0Q0NkVPWCtQbEE5VlYKdko1NFkyMlNOWklCZndLL1dhOFI5dGlISlJsZnk0ZHVFY1h6Njl6TnhCeHIwdU1lZGxoREQvMVlqQnRseVBDaAprWi9EUC9vWHRrYlhlT0FTYlVWK1JqLzlaSnhRYUptZnN0Z1Iwc0VaTDl6QW1McndpejlwQ00yZVQ4aHdwZVRxClNjNUY5RHhReVFJREFRQUJvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBZEJnTlZIUTRFRmdRVWViMEVyTGhIajVRV2NGVHdRSXZYeFBlL0lKWXdEUVlKS29aSWh2Y05BUUVMQlFBRApnZ0VCQUFJdytybXhnSi9ZZXU0emQ5OEFmNktzRHQvbGlNMXBDMGNBTUVUSEE3VDRxL1o2TFo3LzFuTlQydVhnCmppbWF2c0xaU1BXb1lJZUtLUmRiVzFlMUxCdFBCd0svVytsdS9zdVpGZkVWTjhPbUk4ajk5eld5N1VNM0V0K1gKMjNXKzZ2a3A1QmRMemt6Z015ajNuT3pEdzNpc05penZZMElOeEc3MnlmZ0lQQ0ZUVU1aaHlGYkFOYjZrTG1ZSgpheEwyZzFTYmJCM3ZQYmNYRUVsMnA1SHM2S3ZUTkZOQXh0aU5MTkpMUTUxY2hWenZkZEM3WFVhdTZwUWhjb2krCmlvMGdNL3FocEVacEpvc3ZBeXgrSDRGNEFjWkV0YUxmUWEveWI4b2txMXBWYzF6dVhLVEVhNm1GSGhrWVgxMTMKbzBpMEhsT0lsQmRzUk84dEVsemJQWjBldW9ZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
​
[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"
​
[root@k8s-master01 ~]# sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml
​
[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
  etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
  etcd_key: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"# 更改此处为自己的pod网段
[root@k8s-master01 ~]# POD_SUBNET="192.168.0.0/12"

注意下面的这个步骤是把calico-etcd.yaml文件里面的CALICO_IPV4POOL_CIDR下的网段改成自己的Pod网段,也就是把192.168.x.x/16改成自己的集群网段,并打开注释:

[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"
​
[root@k8s-master01 ~]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml
​
[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            - name: CALICO_IPV4POOL_CIDR
              value: 192.168.0.0/12
​
[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
          image: docker.io/calico/cni:v3.24.1
          image: docker.io/calico/node:v3.24.1
          image: docker.io/calico/node:v3.24.1
          image: docker.io/calico/kube-controllers:v3.24.1

下载calico镜像并上传harbor

[root@k8s-master01 ~]# cat download_calico_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_calico_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
images=$(awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml | uniq)
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Calico镜像"${END}
    for i in ${images};do 
        docker pull registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker tag registry.cn-beijing.aliyuncs.com/raymond9/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Calico镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_calico_images.sh 
​
[root@k8s-master01 ~]# docker images
REPOSITORY                                              TAG       IMAGE ID       CREATED        SIZE
harbor.raymonds.cc/google_containers/kube-controllers   v3.24.1   f9c3c1813269   3 weeks ago    71.3MB
harbor.raymonds.cc/google_containers/cni                v3.24.1   67fd9ab48451   3 weeks ago    197MB
harbor.raymonds.cc/google_containers/node               v3.24.1   75392e3500e3   3 weeks ago    223MB
​
[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' calico-etcd.yaml 
​
[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
          image: harbor.raymonds.cc/google_containers/cni:v3.24.1
          image: harbor.raymonds.cc/google_containers/node:v3.24.1
          image: harbor.raymonds.cc/google_containers/node:v3.24.1
          image: harbor.raymonds.cc/google_containers/kube-controllers:v3.24.1
​
[root@k8s-master01 ~]# kubectl apply -f calico-etcd.yaml
​
#查看容器状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep calico
calico-kube-controllers-67bd695c56-vvwn9   1/1     Running   0          42s
calico-node-4drls                          1/1     Running   0          42s
calico-node-8gfq4                          1/1     Running   0          42s
calico-node-ctx8q                          1/1     Running   0          42s
calico-node-cvqxn                          1/1     Running   0          42s
calico-node-g77nk                          1/1     Running   0          42s
calico-node-nnn5l                          1/1     Running   0          42s
​
#查看集群状态
[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE   VERSION
172.31.3.101   Ready    <none>   43m   v1.25.2
172.31.3.102   Ready    <none>   42m   v1.25.2
172.31.3.103   Ready    <none>   42m   v1.25.2
172.31.3.111   Ready    <none>   16m   v1.25.2
172.31.3.112   Ready    <none>   16m   v1.25.2
172.31.3.113   Ready    <none>   16m   v1.25.2

10.安装CoreDNS

github.com/kubernetes/…

021.jpg

在官网查看kubernetes v1.25的coredns版本是v1.9.3

github.com/coredns/dep…

[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.25.2/kubernetes.tar.gz

[root@k8s-master01 ~]# tar xf kubernetes.tar.gz

[root@k8s-master01 ~]# cp kubernetes/cluster/addons/dns/coredns/coredns.yaml.base /root/coredns.yaml

[root@k8s-master01 ~]# vim coredns.yaml
...
data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa { #修改Cluster DNS Domain
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf {
            max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
...
      containers:
      - name: coredns
        image: registry.k8s.io/coredns/coredns:v1.9.3
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 256Mi #设置内存大小,生产一般内存设置2-3G,CPU会分到2-4核,如果再高就开多副本
          requests:
            cpu: 100m
            memory: 70Mi
...
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.10 #如果更改了k8s service的网段需要将coredns的serviceIP改成k8s service网段的第十个IP
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

安装coredns

[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: registry.k8s.io/coredns/coredns:v1.9.3
​
[root@k8s-master01 ~]# cat  download_coredns_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
images=$(awk -F "/"  '/image:/{print $NF}' coredns.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Coredns镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Coredns镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_coredns_images.sh
​
[root@k8s-master01 ~]# docker images |grep coredns
harbor.raymonds.cc/google_containers/coredns            v1.9.3    5185b96f0bec   3 months ago   48.8MB
​
[root@k8s-master01 ~]# sed -ri 's@(.*image:) .*coredns(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' coredns.yaml
​
[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: harbor.raymonds.cc/google_containers/coredns:v1.9.3
​
[root@k8s-master01 ~]# kubectl apply -f coredns.yaml
​
#查看状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep coredns
coredns-8668f8476d-2t5wj                   1/1     Running   0          16s

ubuntu会出现如下问题:

root@k8s-master01:~# kubectl get pod -A -o wide|grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE   IP              NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-9jqq5                  0/1     CrashLoopBackOff   1          8s    192.171.30.65   k8s-master02.example.local   <none>           <none>
​
#由于ubuntu系统有dns本地缓存,造成coredns不能正常解析
#具体问题请参考官方https://coredns.io/plugins/loop/#troubleshooting
​
root@k8s-master01:~# kubectl edit -n kube-system cm coredns
...
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop #将loop插件直接删除,避免内部循环
        reload
        loadbalance
    }
​
root@k8s-master01:~# kubectl get pod -A -o wide |grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE    IP               NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-r9tsd                  0/1     CrashLoopBackOff   4          3m4s   192.170.21.195   k8s-node03.example.local     <none>           <none>
​
root@k8s-master01:~# kubectl delete pod coredns-847c895554-r9tsd -n kube-system 
pod "coredns-847c895554-r9tsd" deleted
​
root@k8s-master01:~# kubectl get pod -A -o wide |greo coredns
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE   IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-cqwl5                  1/1     Running   0          13s   192.167.195.130   k8s-node02.example.local     <none>           <none>
#现在就正常了

11.安装Metrics Server

github.com/kubernetes-…

在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。

[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

修改下面内容:

[root@k8s-master01 ~]# vim components.yaml
...
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
#添加下面内容        
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem #注意二进制包证书文件是front-proxy-ca.pem
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra-    
...
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
#添加下面内容 
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
...
      volumes:
      - emptyDir: {}
        name: tmp-dir
#添加下面内容 
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki 

下载镜像并修改镜像地址

[root@k8s-master01 ~]# grep "image:" components.yaml
        image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
​
[root@k8s-master01 ~]# cat download_metrics_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
images=$(awk -F "/"  '/image:/{print $NF}' components.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Metrics镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Metrics镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_metrics_images.sh
​
[root@k8s-master01 ~]# docker images |grep metrics
harbor.raymonds.cc/google_containers/metrics-server     v0.6.1    e57a417f15d3   7 months ago   68.8MB
​
[root@k8s-master01 ~]# sed -ri 's@(.*image:) .*metrics-server(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' components.yaml
​
[root@k8s-master01 ~]# grep "image:" components.yaml
        image: harbor.raymonds.cc/google_containers/metrics-server:v0.6.1

安装metrics server

[root@k8s-master01 ~]# kubectl apply -f components.yaml

查看状态

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-785dd7cc54-wzbdc            1/1     Running   0          31s
  
[root@k8s-master01 ~]# kubectl top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
172.31.3.101   224m         11%    1708Mi          47%       
172.31.3.102   206m         10%    997Mi           27%       
172.31.3.103   117m         5%     926Mi           25%       
172.31.3.111   124m         6%     643Mi           17%       
172.31.3.112   96m          4%     603Mi           16%       
172.31.3.113   90m          4%     606Mi           16%

12.安装dashboard

12.1 Dashboard部署

Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。

github.com/kubernetes/…

查看对应版本兼容的kubernetes版本

022.jpg

可以看到上图dashboard v2.7.0是支持kuberneres 1.25版本的

[root@k8s-master01 ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

[root@k8s-master01 ~]# vim recommended.yaml 
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005 #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: kubernetesui/dashboard:v2.7.0
          image: kubernetesui/metrics-scraper:v1.0.8

下载镜像并上传到harbor

[root@k8s-master01 ~]# cat download_dashboard_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_dashboard_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
images=$(awk -F "/"  '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Dashboard镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Dashboard镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_dashboard_images.sh
​
[root@k8s-master01 ~]# docker images |grep -E "(dashboard|metrics-scraper)"
harbor.raymonds.cc/google_containers/dashboard          v2.7.0    07655ddf2eeb   7 days ago     246MB
harbor.raymonds.cc/google_containers/metrics-scraper    v1.0.8    115053965e86   3 months ago   43.8MB
​
[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml
​
[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: harbor.raymonds.cc/google_containers/dashboard:v2.7.0
          image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.8
          
[root@k8s-master01 ~]# kubectl  create -f recommended.yaml        

创建管理员用户admin.yaml

[root@k8s-master01 ~]# cat > admin.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF

[root@k8s-master01 ~]# kubectl apply -f admin.yaml

12.2 登录dashboard

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图1-1:

--test-type --ignore-certificate-errors

023.jpg

图1-1 谷歌浏览器 Chrome的配置

[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.106.156.213   <none>        443:30005/TCP   83s

访问Dashboard:https://172.31.3.101:30005,参考图1-2

024.jpg

图1-2 Dashboard登录方式

12.2.1 token登录

创建token:

[root@k8s-master01 ~]# kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6Im5NTTViMlpDcy0zWGh2Tm5hbDA0N3ItZHFDWVRZUnpldW5pOFFSdHdsNTQifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYzOTQ0MTQ3LCJpYXQiOjE2NjM5NDA1NDcsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMWQwZjU4MmEtMTZlZi00ZTc2LTk0MTYtMWQxYmVlMDViNTFjIn19LCJuYmYiOjE2NjM5NDA1NDcsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.K189UqhW2WkExVAxmebrcZZBLyOQUFIgrdmRglSAv6wJPnWxlK2QDdZ4aOtHt8D_FxhsalDfshzTBsbSM-Z-azoARinjJl8umIF8NIokOK38rFr5LcYm1B-oN97hIdnU9gjPZOcUzWCQU_lIdGIj-PTZCM3SKf-ursnofO6HEUlF0J47Q2MJn14z2AQv8BL9ivPU3mHKpIqFJYkyr4mhvUOtwRS1Tk5ZNxTsNJm5Ce_lqNgT_32JvStwMIxDHiMogWxhPabh8-S5RDhMqTvc4AgY1GffNRWmJ5wIsY7PIULeaCrPeC7TkE7M4lhduCR358f8J6G7KhsTb95CwumYOQ

将token值输入到令牌后,单击登录即可访问Dashboard,参考图1-3:

025.jpg

026.jpg

12.2.2 使用kubeconfig文件登录dashboard

[root@k8s-master01 ~]# cp /etc/kubernetes/admin.kubeconfig kubeconfig
​
[root@k8s-master01 ~]# vim kubeconfig 
...
#在最下面添加token
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6Im5NTTViMlpDcy0zWGh2Tm5hbDA0N3ItZHFDWVRZUnpldW5pOFFSdHdsNTQifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYzOTQ0MTQ3LCJpYXQiOjE2NjM5NDA1NDcsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMWQwZjU4MmEtMTZlZi00ZTc2LTk0MTYtMWQxYmVlMDViNTFjIn19LCJuYmYiOjE2NjM5NDA1NDcsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.K189UqhW2WkExVAxmebrcZZBLyOQUFIgrdmRglSAv6wJPnWxlK2QDdZ4aOtHt8D_FxhsalDfshzTBsbSM-Z-azoARinjJl8umIF8NIokOK38rFr5LcYm1B-oN97hIdnU9gjPZOcUzWCQU_lIdGIj-PTZCM3SKf-ursnofO6HEUlF0J47Q2MJn14z2AQv8BL9ivPU3mHKpIqFJYkyr4mhvUOtwRS1Tk5ZNxTsNJm5Ce_lqNgT_32JvStwMIxDHiMogWxhPabh8-S5RDhMqTvc4AgY1GffNRWmJ5wIsY7PIULeaCrPeC7TkE7M4lhduCR358f8J6G7KhsTb95CwumYOQ

027.jpg

028.jpg

13.集群验证

安装busybox

cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
  1. Pod必须能解析Service
  2. Pod必须能解析跨namespace的Service
  3. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
  4. Pod和Pod之前要能通

a) 同namespace能通信

b) 跨namespace能通信

c) 跨机器能通信

验证解析

[root@k8s-master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   3h9m
​
#Pod必须能解析Service
[root@k8s-master01 ~]# kubectl exec  busybox -n default -- nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
​
Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
​
#Pod必须能解析跨namespace的Service
[root@k8s-master01 ~]# kubectl exec  busybox -n default -- nslookup kube-dns.kube-system
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
​
Name:      kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

[root@k8s-master01 ~]# telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
^CConnection closed by foreign host.
​
[root@k8s-master02 ~]# telnet 10.96.0.1 443
[root@k8s-master03 ~]# telnet 10.96.0.1 443
​
[root@k8s-node01 ~]# telnet 10.96.0.1 443
[root@k8s-node02 ~]# telnet 10.96.0.1 443
[root@k8s-node03 ~]# telnet 10.96.0.1 443
​
[root@k8s-master01 ~]# kubectl get svc -n kube-system
NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGE
kube-dns         ClusterIP   10.96.0.10     <none>        53/UDP,53/TCP,9153/TCP   39m
metrics-server   ClusterIP   10.102.94.81   <none>        443/TCP                  28m
​
[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
^CConnection closed by foreign host.
​
[root@k8s-master02 ~]# telnet 10.96.0.10 53
[root@k8s-master03 ~]# telnet 10.96.0.10 53
​
[root@k8s-node01 ~]# telnet 10.96.0.10 53
[root@k8s-node02 ~]# telnet 10.96.0.10 53
[root@k8s-node03 ~]# telnet 10.96.0.10 53
​
[root@k8s-master01 ~]# curl 10.96.0.10:53
curl: (52) Empty reply from server
​
[root@k8s-master02 ~]# curl 10.96.0.10:53
[root@k8s-master03 ~]# curl 10.96.0.10:53
​
[root@k8s-node01 ~]# curl 10.96.0.10:53
[root@k8s-node02 ~]# curl 10.96.0.10:53
[root@k8s-node03 ~]# curl 10.96.0.10:53

Pod和Pod之前要能通

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP                NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-67bd695c56-vvwn9   1/1     Running   0          96m   172.31.3.101      172.31.3.101   <none>           <none>
calico-node-4drls                          1/1     Running   0          96m   172.31.3.111      172.31.3.111   <none>           <none>
calico-node-8gfq4                          1/1     Running   0          96m   172.31.3.113      172.31.3.113   <none>           <none>
calico-node-ctx8q                          1/1     Running   0          96m   172.31.3.101      172.31.3.101   <none>           <none>
calico-node-cvqxn                          1/1     Running   0          96m   172.31.3.103      172.31.3.103   <none>           <none>
calico-node-g77nk                          1/1     Running   0          96m   172.31.3.102      172.31.3.102   <none>           <none>
calico-node-nnn5l                          1/1     Running   0          96m   172.31.3.112      172.31.3.112   <none>           <none>
coredns-8668f8476d-2t5wj                   1/1     Running   0          41m   192.171.30.65     172.31.3.102   <none>           <none>
metrics-server-785dd7cc54-wzbdc            1/1     Running   0          30m   192.169.111.129   172.31.3.111   <none>           <none>
​
[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME      READY   STATUS    RESTARTS   AGE     IP               NODE           NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          4m54s   192.170.21.193   172.31.3.113   <none>           <none>
​
[root@k8s-master01 ~]# kubectl exec -it busybox -- sh
/ # ping 192.169.111.129
PING 192.169.111.129 (192.169.111.129): 56 data bytes
64 bytes from 192.169.111.129: seq=0 ttl=62 time=1.241 ms
64 bytes from 192.169.111.129: seq=1 ttl=62 time=0.810 ms
^C
--- 192.169.111.129 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.810/1.025/1.241 ms
/ # exit
​
[root@k8s-master01 ~]# kubectl create deploy nginx --image=nginx --replicas=3
deployment.apps/nginx created
​
[root@k8s-master01 ~]# kubectl get deploy
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   3/3     3            3           93s
[root@k8s-master01 ~]# kubectl get pod -o wide |grep nginx
nginx-76d6c9b8c-g29fr   1/1     Running   0          116s    192.162.55.65     172.31.3.101   <none>           <none>
nginx-76d6c9b8c-pf9pm   1/1     Running   0          116s    192.169.111.130   172.31.3.111   <none>           <none>
nginx-76d6c9b8c-xphs2   1/1     Running   0          116s    192.170.21.194    172.31.3.113   <none>           <none>
​
[root@k8s-master01 ~]# kubectl delete deploy nginx
deployment.apps "nginx" deleted
[root@k8s-master01 ~]# kubectl delete pod busybox
pod "busybox" deleted

14.生产环境关键性配置

docker参数配置:

vim /etc/docker/daemon.json
{  
    "registry-mirrors": [ #docker镜像加速
    "https://registry.docker-cn.com",
    "http://hub-mirror.c.163.com",
    "https://docker.mirrors.ustc.edu.cn"
    ],
    "exec-opts": ["native.cgroupdriver=systemd"], #k8s需要systemd启动docker
    "max-concurrent-downloads": 10, #并发下载线程数
    "max-concurrent-uploads": 5, #并发上传线程数
    "log-opts": {
        "max-size": "300m", #docker日志文件最大300m
        "max-file": "2" #最大2个文件
    },
    "live-restore": true #docker服务重启,容器不会重启
}

controller-manager 参数配置:

[root@k8s-master01 ~]# vim /lib/systemd/system/kube-controller-manager.service
# --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \ #这个是bootstrap自动颁发证书,新版默认就是true,不用设置
      --cluster-signing-duration=876000h0m0s \ #用来控制签发证书的有效期限。[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp -o StrictHostKeyChecking=no /lib/systemd/system/kube-controller-manager.service $NODE:/lib/systemd/system/; done
kube-controller-manager.service                                                                              100% 1113   670.4KB/s   00:00    
kube-controller-manager.service                                                                              100% 1113     1.0MB/s   00:00
​
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager

10-kubelet.conf 参数配置:

[root@k8s-master01 ~]# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --image-pull-progress-deadline=30m"#--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384  #更改k8s的加密算法
#--image-pull-progress-deadline=30m  #如果在该参数值所设置的期限之前没有拉取镜像的进展,镜像拉取操作将被取消。仅当容器运行环境设置为 docker 时,此特定于 docker 的参数才有效。[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03; do scp -o StrictHostKeyChecking=no /etc/systemd/system/kubelet.service.d/10-kubelet.conf $NODE:/etc/systemd/system/kubelet.service.d/ ;done 

kubelet-conf.yml 参数配置:

[root@k8s-master01 ~]# vim /etc/kubernetes/kubelet-conf.yml
#添加如下配置
rotateServerCertificates: true
allowedUnsafeSysctls: #允许容器设置内核,有安全风险,根据实际需求设置
 - "net.core*"
 - "net.ipv4.*"
kubeReserved: #k8s预留资源
  cpu: "1"
  memory: 1Gi
  ephemeral-storage: 10Gi
systemReserved: #系统预留资源
  cpu: "1"
  memory: 1Gi
  ephemeral-storage: 10Gi
  
#rotateServerCertificates: true  #当证书即将过期时自动从 kube-apiserver 请求新的证书进行轮换。要求启用 RotateKubeletServerCertificate 特性开关,以及对提交的 CertificateSigningRequest 对象进行批复(approve)操作。
  
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done
​
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl restart kubelet

添加label:

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
172.31.3.101   Ready    <none>   151m   v1.25.2
172.31.3.102   Ready    <none>   150m   v1.25.2
172.31.3.103   Ready    <none>   150m   v1.25.2
172.31.3.111   Ready    <none>   124m   v1.25.2
172.31.3.112   Ready    <none>   124m   v1.25.2
172.31.3.113   Ready    <none>   124m   v1.25.2
​
[root@k8s-master01 ~]# kubectl get node --show-labels
NAME           STATUS   ROLES    AGE    VERSION   LABELS
172.31.3.101   Ready    <none>   151m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.101,kubernetes.io/os=linux,node.kubernetes.io/node=
172.31.3.102   Ready    <none>   150m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.102,kubernetes.io/os=linux,node.kubernetes.io/node=
172.31.3.103   Ready    <none>   150m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.103,kubernetes.io/os=linux,node.kubernetes.io/node=
172.31.3.111   Ready    <none>   124m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.111,kubernetes.io/os=linux,node.kubernetes.io/node=
172.31.3.112   Ready    <none>   124m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.112,kubernetes.io/os=linux,node.kubernetes.io/node=
172.31.3.113   Ready    <none>   124m   v1.25.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=172.31.3.113,kubernetes.io/os=linux,node.kubernetes.io/node=
​
[root@k8s-master01 ~]# kubectl label node 172.31.3.101 node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master=''
node/172.31.3.101 labeled
​
[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES                  AGE    VERSION
172.31.3.101   Ready    control-plane,master   152m   v1.25.2
172.31.3.102   Ready    <none>                 151m   v1.25.2
172.31.3.103   Ready    <none>                 151m   v1.25.2
172.31.3.111   Ready    <none>                 125m   v1.25.2
172.31.3.112   Ready    <none>                 125m   v1.25.2
172.31.3.113   Ready    <none>                 125m   v1.25.2
​
[root@k8s-master01 ~]# kubectl label node 172.31.3.102 node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master=''
node/172.31.3.102 labeled
​
[root@k8s-master01 ~]# kubectl label node 172.31.3.103 node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master=''
node/172.31.3.103 labeled
​
[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES                  AGE    VERSION
172.31.3.101   Ready    control-plane,master   152m   v1.25.2
172.31.3.102   Ready    control-plane,master   152m   v1.25.2
172.31.3.103   Ready    control-plane,master   151m   v1.25.2
172.31.3.111   Ready    <none>                 125m   v1.25.2
172.31.3.112   Ready    <none>                 125m   v1.25.2
172.31.3.113   Ready    <none>                 125m   v1.25.2

安装总结:

1、 kubeadm

2、 二进制

3、 自动化安装

a) Ansible

i. Master节点安装不需要写自动化。

ii. 添加Node节点,playbook。

4、 安装需要注意的细节

a) 上面的细节配置

b) 生产环境中etcd一定要和系统盘分开,一定要用ssd硬盘。

c) Docker数据盘也要和系统盘分开,有条件的话可以使用ssd硬盘