“ 本文正在参加「金石计划 . 瓜分6万现金大奖」 ”
7.部署docker
master和node安装docker-ce:
[root@k8s-master01 ~]# cat install_docker_binary.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2021-12-07
#FileName: install_docker_binary.sh
#URL: raymond.blog.csdn.net
#Description: install_docker_binary for centos 7/8 & ubuntu 18.04/20.04 & Rocky 8
#Copyright (C): 2021 All rights reserved
#*********************************************************************************************
SRC_DIR=/usr/local/src
COLOR="echo -e \\033[01;31m"
END='\033[0m'
URL='https://mirrors.cloud.tencent.com/docker-ce/linux/static/stable/x86_64/'
DOCKER_FILE=docker-20.10.12.tgz
HARBOR_DOMAIN=harbor.raymonds.cc
os(){
OS_ID=`sed -rn '/^NAME=/s@.*="([[:alpha:]]+).*"$@\1@p' /etc/os-release`
}
check_file (){
cd ${SRC_DIR}
rpm -q wget &> /dev/null || yum -y install wget &> /dev/null
if [ ! -e ${DOCKER_FILE} ];then
${COLOR}"缺少${DOCKER_FILE}文件,如果是离线包,请把文件放到${SRC_DIR}目录下"${END}
${COLOR}'开始下载DOCKER二进制安装包'${END}
wget ${URL}${DOCKER_FILE} || { ${COLOR}"DOCKER二进制安装包下载失败"${END}; exit; }
else
${COLOR}"相关文件已准备好"${END}
fi
}
install(){
[ -f /usr/bin/docker ] && { ${COLOR}"DOCKER已存在,安装失败"${END};exit; }
${COLOR}"开始安装DOCKER..."${END}
tar xf ${DOCKER_FILE}
mv docker/* /usr/bin/
cat > /lib/systemd/system/docker.service <<-EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H unix://var/run/docker.sock
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
mkdir -p /etc/docker
cat > /etc/docker/daemon.json <<-EOF
{
"registry-mirrors": [
"https://registry.docker-cn.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn"
],
"insecure-registries": ["${HARBOR_DOMAIN}"],
"exec-opts": ["native.cgroupdriver=systemd"],
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 5,
"log-opts": {
"max-size": "300m",
"max-file": "2"
},
"live-restore": true
}
EOF
echo 'alias rmi="docker images -qa|xargs docker rmi -f"' >> ~/.bashrc
echo 'alias rmc="docker ps -qa|xargs docker rm -f"' >> ~/.bashrc
systemctl daemon-reload
systemctl enable --now docker &> /dev/null
systemctl is-active docker &> /dev/null && ${COLOR}"Docker 服务启动成功"${END} || { ${COLOR}"Docker 启动失败"${END};exit; }
docker version && ${COLOR}"Docker 安装成功"${END} || ${COLOR}"Docker 安装失败"${END}
}
set_swap_limit(){
if [ ${OS_ID} == "Ubuntu" ];then
${COLOR}'设置Docker的"WARNING: No swap limit support"警告'${END}
sed -ri '/^GRUB_CMDLINE_LINUX=/s@"$@ swapaccount=1"@' /etc/default/grub
update-grub &> /dev/null
${COLOR}"10秒后,机器会自动重启"${END}
sleep 10
reboot
fi
}
main(){
os
check_file
install
set_swap_limit
}
main
[root@k8s-master01 ~]# bash install_docker_binary.sh
[root@k8s-master02 ~]# bash install_docker_binary.sh
[root@k8s-master03 ~]# bash install_docker_binary.sh
[root@k8s-node01 ~]# bash install_docker_binary.sh
[root@k8s-node02 ~]# bash install_docker_binary.sh
[root@k8s-node03 ~]# bash install_docker_binary.sh
8.部署master
8.1 创建etcd相关目录和复制etcd证书
master节点创建etcd证书目录
[root@k8s-master01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master03 ~]# mkdir /etc/etcd/ssl -p
将etcd证书复制到master节点
[root@k8s-etcd01 pki]# for NODE in k8s-master01 k8s-master02 k8s-master03; do
ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
所有master节点创建etcd的证书目录
[root@k8s-master01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master03 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
8.2 安装kubernetes组件
下载kubernetes安装包
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.21.8/kubernetes-server-linux-amd64.tar.gz
需要下载最新的1.21.x版本:
打开页面后点击:
解压kubernetes安装文件
[root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
版本查看
[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.21.8
将组件发送到其他master节点
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; done
master节点创建/opt/cni/bin目录
[root@k8s-master01 ~]# mkdir -p /opt/cni/bin
[root@k8s-master02 ~]# mkdir -p /opt/cni/bin
[root@k8s-master03 ~]# mkdir -p /opt/cni/bin
8.3 生成k8s组件证书
二进制安装最关键步骤,一步错误全盘皆输,一定要注意每个步骤都要是正确的
master节点创建kubernetes相关目录
[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/pki
master01下载生成证书工具
[root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
[root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
[root@k8s-master01 ~]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
8.3.1 生成ca证书
[root@k8s-master01 ~]# mkdir pki
[root@k8s-master01 ~]# cd pki/
[root@k8s-master01 pki]# cat ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
#执行结果
2022/02/25 16:19:52 [INFO] generating a new CA key and certificate from CSR
2022/02/25 16:19:52 [INFO] generate received request
2022/02/25 16:19:52 [INFO] received CSR
2022/02/25 16:19:52 [INFO] generating key: rsa-2048
2022/02/25 16:19:52 [INFO] encoded CSR
2022/02/25 16:19:52 [INFO] signed certificate with serial number 50700045204155982111782984381054779655420622936
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/ca*
-rw-r--r-- 1 root root 1025 Feb 25 16:19 /etc/kubernetes/pki/ca.csr
-rw------- 1 root root 1675 Feb 25 16:19 /etc/kubernetes/pki/ca-key.pem
-rw-r--r-- 1 root root 1411 Feb 25 16:19 /etc/kubernetes/pki/ca.pem
8.3.2 生成apiserver证书
# 10.96.0.是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1,
# 如果不是高可用集群,172.31.3.188为Master01的IP
[root@k8s-master01 pki]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
[root@k8s-master01 pki]# cat apiserver-csr.json
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,172.31.3.188,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,172.31.3.101,172.31.3.102,172.31.3.103 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
#执行结果
2022/02/25 16:21:42 [INFO] generate received request
2022/02/25 16:21:42 [INFO] received CSR
2022/02/25 16:21:42 [INFO] generating key: rsa-2048
2022/02/25 16:21:42 [INFO] encoded CSR
2022/02/25 16:21:42 [INFO] signed certificate with serial number 14326114816925312981811634565226925868722808544
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/apiserver*
-rw-r--r-- 1 root root 1029 Feb 25 16:21 /etc/kubernetes/pki/apiserver.csr
-rw------- 1 root root 1675 Feb 25 16:21 /etc/kubernetes/pki/apiserver-key.pem
-rw-r--r-- 1 root root 1692 Feb 25 16:21 /etc/kubernetes/pki/apiserver.pem
8.3.3 生成apiserver的聚合证书
生成apiserver的聚合证书。Requestheader-client-xxx requestheader-allowwd-xxx:aggerator
[root@k8s-master01 pki]# cat front-proxy-ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
#执行结果
2022/02/25 16:23:07 [INFO] generating a new CA key and certificate from CSR
2022/02/25 16:23:07 [INFO] generate received request
2022/02/25 16:23:07 [INFO] received CSR
2022/02/25 16:23:07 [INFO] generating key: rsa-2048
2022/02/25 16:23:07 [INFO] encoded CSR
2022/02/25 16:23:07 [INFO] signed certificate with serial number 60929528331736839052879833998406013639330884564
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-ca*
-rw-r--r-- 1 root root 891 Feb 25 16:23 /etc/kubernetes/pki/front-proxy-ca.csr
-rw------- 1 root root 1679 Feb 25 16:23 /etc/kubernetes/pki/front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1143 Feb 25 16:23 /etc/kubernetes/pki/front-proxy-ca.pem
[root@k8s-master01 pki]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
[root@k8s-master01 pki]# cat front-proxy-client-csr.json
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
#返回结果(忽略警告)
2022/02/25 16:24:29 [INFO] generate received request
2022/02/25 16:24:29 [INFO] received CSR
2022/02/25 16:24:29 [INFO] generating key: rsa-2048
2022/02/25 16:24:29 [INFO] encoded CSR
2022/02/25 16:24:29 [INFO] signed certificate with serial number 625247142890350892356758545319462713918431205897
2022/02/25 16:24:29 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-client*
-rw-r--r-- 1 root root 903 Feb 25 16:24 /etc/kubernetes/pki/front-proxy-client.csr
-rw------- 1 root root 1675 Feb 25 16:24 /etc/kubernetes/pki/front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Feb 25 16:24 /etc/kubernetes/pki/front-proxy-client.pem
8.3.4 生成controller-manage的证书和配置文件
[root@k8s-master01 pki]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
[root@k8s-master01 pki]# cat manager-csr.json
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
#执行结果
2022/02/25 16:30:55 [INFO] generate received request
2022/02/25 16:30:55 [INFO] received CSR
2022/02/25 16:30:55 [INFO] generating key: rsa-2048
2022/02/25 16:30:56 [INFO] encoded CSR
2022/02/25 16:30:56 [INFO] signed certificate with serial number 603818792655902270143954916469072941384696164716
2022/02/25 16:30:56 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/controller-manager*
-rw-r--r-- 1 root root 1082 Feb 25 16:30 /etc/kubernetes/pki/controller-manager.csr
-rw------- 1 root root 1675 Feb 25 16:30 /etc/kubernetes/pki/controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Feb 25 16:30 /etc/kubernetes/pki/controller-manager.pem
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
# set-cluster:设置一个集群项,
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://172.31.3.188:6443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Cluster "kubernetes" set.
# set-credentials 设置一个用户项
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
User "system:kube-controller-manager" set.
# 设置一个环境项,一个上下文
[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Context "system:kube-controller-manager@kubernetes" created.
# 使用某个环境当做默认环境
[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Switched to context "system:kube-controller-manager@kubernetes".
8.3.5 生成scheduler的证书和配置文件
[root@k8s-master01 pki]# cat scheduler-csr.json
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
#执行结果
2022/02/25 16:34:13 [INFO] generate received request
2022/02/25 16:34:13 [INFO] received CSR
2022/02/25 16:34:13 [INFO] generating key: rsa-2048
2022/02/25 16:34:14 [INFO] encoded CSR
2022/02/25 16:34:14 [INFO] signed certificate with serial number 28588241162948355825534653175725392424189325408
2022/02/25 16:34:14 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/scheduler*
-rw-r--r-- 1 root root 1058 Feb 25 16:34 /etc/kubernetes/pki/scheduler.csr
-rw------- 1 root root 1679 Feb 25 16:34 /etc/kubernetes/pki/scheduler-key.pem
-rw-r--r-- 1 root root 1476 Feb 25 16:34 /etc/kubernetes/pki/scheduler.pem
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://172.31.3.188:6443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Cluster "kubernetes" set.
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
User "system:kube-scheduler" set.
[root@k8s-master01 pki]# kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Context "system:kube-scheduler@kubernetes" created.
[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Switched to context "system:kube-scheduler@kubernetes".
8.3.6 生成admin证书和配置文件
[root@k8s-master01 pki]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
[root@k8s-master01 pki]# cat admin-csr.json
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
#执行结果
2022/02/25 16:37:03 [INFO] generate received request
2022/02/25 16:37:03 [INFO] received CSR
2022/02/25 16:37:03 [INFO] generating key: rsa-2048
2022/02/25 16:37:03 [INFO] encoded CSR
2022/02/25 16:37:03 [INFO] signed certificate with serial number 406554626408020347401920084654276875970659166990
2022/02/25 16:37:03 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/admin*
-rw-r--r-- 1 root root 1025 Feb 25 16:37 /etc/kubernetes/pki/admin.csr
-rw------- 1 root root 1675 Feb 25 16:37 /etc/kubernetes/pki/admin-key.pem
-rw-r--r-- 1 root root 1444 Feb 25 16:37 /etc/kubernetes/pki/admin.pem
# 注意,如果不是高可用集群,172.31.3.188:8443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Cluster "kubernetes" set.
[root@k8s-master01 pki]# kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
User "kubernetes-admin" set.
[root@k8s-master01 pki]# kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Context "kubernetes-admin@kubernetes" created.
[root@k8s-master01 pki]# kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Switched to context "kubernetes-admin@kubernetes".
8.3.7 创建sa key
创建ServiceAccount Key à secret
[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
#返回结果
Generating RSA private key, 2048 bit long modulus
..............................+++
....................................+++
e is 65537 (0x10001)
[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#执行结果
writing RSA key
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/sa*
-rw-r--r-- 1 root root 1675 Feb 25 16:38 /etc/kubernetes/pki/sa.key
-rw-r--r-- 1 root root 451 Feb 25 16:39 /etc/kubernetes/pki/sa.pub
发送证书至其他master节点
[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do
for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do
scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
done;
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do
scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
done;
done
查看证书文件
[root@k8s-master01 pki]# ll -R /etc/kubernetes/pki/
/etc/kubernetes/pki/:
total 92
-rw-r--r-- 1 root root 1025 Feb 25 16:37 admin.csr
-rw------- 1 root root 1675 Feb 25 16:37 admin-key.pem
-rw-r--r-- 1 root root 1444 Feb 25 16:37 admin.pem
-rw-r--r-- 1 root root 1029 Feb 25 16:21 apiserver.csr
-rw------- 1 root root 1675 Feb 25 16:21 apiserver-key.pem
-rw-r--r-- 1 root root 1692 Feb 25 16:21 apiserver.pem
-rw-r--r-- 1 root root 1025 Feb 25 16:19 ca.csr
-rw------- 1 root root 1675 Feb 25 16:19 ca-key.pem
-rw-r--r-- 1 root root 1411 Feb 25 16:19 ca.pem
-rw-r--r-- 1 root root 1082 Feb 25 16:30 controller-manager.csr
-rw------- 1 root root 1675 Feb 25 16:30 controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Feb 25 16:30 controller-manager.pem
drwxr-xr-x 2 root root 84 Feb 25 15:56 etcd
-rw-r--r-- 1 root root 891 Feb 25 16:23 front-proxy-ca.csr
-rw------- 1 root root 1679 Feb 25 16:23 front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1143 Feb 25 16:23 front-proxy-ca.pem
-rw-r--r-- 1 root root 903 Feb 25 16:24 front-proxy-client.csr
-rw------- 1 root root 1675 Feb 25 16:24 front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Feb 25 16:24 front-proxy-client.pem
-rw-r--r-- 1 root root 1675 Feb 25 16:38 sa.key
-rw-r--r-- 1 root root 451 Feb 25 16:39 sa.pub
-rw-r--r-- 1 root root 1058 Feb 25 16:34 scheduler.csr
-rw------- 1 root root 1679 Feb 25 16:34 scheduler-key.pem
-rw-r--r-- 1 root root 1476 Feb 25 16:34 scheduler.pem
/etc/kubernetes/pki/etcd:
total 0
lrwxrwxrwx 1 root root 29 Feb 25 15:56 etcd-ca-key.pem -> /etc/etcd/ssl/etcd-ca-key.pem
lrwxrwxrwx 1 root root 25 Feb 25 15:56 etcd-ca.pem -> /etc/etcd/ssl/etcd-ca.pem
lrwxrwxrwx 1 root root 26 Feb 25 15:56 etcd-key.pem -> /etc/etcd/ssl/etcd-key.pem
lrwxrwxrwx 1 root root 22 Feb 25 15:56 etcd.pem -> /etc/etcd/ssl/etcd.pem
8.4 Kubernetes组件配置
master节点创建相关目录
[root@k8s-master01 pki]# cd
[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
8.4.1 Apiserver
所有Master节点创建kube-apiserver service,# 注意,如果不是高可用集群,172.31.3.188改为master01的地址
8.4.1.1 Master01配置
注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改
[root@k8s-master01 ~]# vim /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address=172.31.3.101 \
--service-cluster-ip-range=10.96.0.0/12 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
8.4.1.2 Master02配置
注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改
[root@k8s-master02 ~]# vim /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address=172.31.3.102 \
--service-cluster-ip-range=10.96.0.0/12 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
8.4.1.3 Master03配置
注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改
[root@k8s-master03 ~]# vim /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address=172.31.3.103 \
--service-cluster-ip-range=10.96.0.0/12 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
8.4.1.4 启动apiserver
所有Master节点开启kube-apiserver
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
检测kube-server状态
[root@k8s-master01 ~]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-02-25 16:55:38 CST; 15s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 13671 (kube-apiserver)
Tasks: 8
Memory: 302.6M
CGroup: /system.slice/kube-apiserver.service
└─13671 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 ...
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.090985 13671 storage_rbac.go:326] created rolebindin...ystem
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.100954 13671 storage_rbac.go:326] created rolebindin...ystem
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.102507 13671 healthz.go:244] poststarthook/rbac/boot...eadyz
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.110647 13671 storage_rbac.go:326] created rolebindin...ystem
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.120843 13671 storage_rbac.go:326] created rolebindin...ystem
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.130291 13671 storage_rbac.go:326] created rolebindin...ublic
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: W0225 16:55:43.237118 13671 lease.go:233] Resetting endpoints for m....101]
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.238022 13671 controller.go:609] quota admission adde...oints
Feb 25 16:55:43 k8s-master01.example.local kube-apiserver[13671]: I0225 16:55:43.245826 13671 controller.go:609] quota admission adde...8s.io
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master02 ~]# systemctl status kube-apiserver
[root@k8s-master03 ~]# systemctl status kube-apiserver
系统日志的这些提示可以忽略
[root@k8s-master01 ~]# tail -f /var/log/messages
...
Feb 25 16:56:23 k8s-master01 kube-apiserver: I0225 16:56:23.136848 13671 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc = "transport is closing"
8.4.2 ControllerManager
所有Master节点配置kube-controller-manager service
注意本文档使用的k8s Pod网段为192.168.0.0/12,该网段不能和宿主机的网段、k8s Service网段的重复,请按需修改
[root@k8s-master01 ~]# vim /lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=192.168.0.0/12 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-controller-manager.service $NODE:/lib/systemd/system/; done
所有Master节点启动kube-controller-manager
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
查看启动状态
[root@k8s-master01 ~]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-02-25 16:58:09 CST; 16s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 13723 (kube-controller)
Tasks: 6
Memory: 38.7M
CGroup: /system.slice/kube-controller-manager.service
└─13723 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/ca...
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.294372 13723 reflector.go:219] Starting refl...:134
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.294470 13723 reflector.go:219] Starting refl...:134
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.294698 13723 reflector.go:219] Starting refl...:134
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.294813 13723 reflector.go:219] Starting refl...:134
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.294941 13723 reflector.go:219] Starting refl...o:90
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.300284 13723 reflector.go:219] Starting refl...o:90
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.593499 13723 shared_informer.go:247] Caches ...ctor
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.593529 13723 garbagecollector.go:254] synced...ctor
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.630713 13723 shared_informer.go:247] Caches ...ctor
Feb 25 16:58:24 k8s-master01.example.local kube-controller-manager[13723]: I0225 16:58:24.630766 13723 garbagecollector.go:151] Garbag...bage
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master02 ~]# systemctl status kube-controller-manager
[root@k8s-master03 ~]# systemctl status kube-controller-manager
8.4.3 Scheduler
所有Master节点配置kube-scheduler service
[root@k8s-master01 ~]# vim /lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-scheduler.service $NODE:/lib/systemd/system/; done
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master01 ~]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-02-25 16:59:50 CST; 14s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 13770 (kube-scheduler)
Tasks: 7
Memory: 18.1M
CGroup: /system.slice/kube-scheduler.service
└─13770 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubernet...
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.030124 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.030359 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.030534 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.030779 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.031157 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.031425 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.031717 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.032718 13770 reflector.go:219] Starting reflector *v...o:134
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.128803 13770 leaderelection.go:243] attempting to ac...er...
Feb 25 16:59:52 k8s-master01.example.local kube-scheduler[13770]: I0225 16:59:52.153623 13770 leaderelection.go:253] successfully acq...duler
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master02 ~]# systemctl status kube-scheduler
[root@k8s-master03 ~]# systemctl status kube-scheduler
8.4.4 TLS Bootstrapping配置
在Master01创建bootstrap
[root@k8s-master01 ~]# vim bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Cluster "kubernetes" set.
[root@k8s-master01 ~]# kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
User "tls-bootstrap-token-user" set.
[root@k8s-master01 ~]# kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Context "tls-bootstrap-token-user@kubernetes" modified.
[root@k8s-master01 ~]# kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Switched to context "tls-bootstrap-token-user@kubernetes".
注意:如果要修改bootstrap.secret.yaml的token-id和token-secret,需要保证下图红圈内的字符串一致的,并且位数是一样的。还要保证上个命令的黄色字体:c8ad9c.2e4d610cf3e7426e与你修改的字符串要一致
[root@k8s-master01 ~]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
[root@k8s-master01 ~]# kubectl create -f bootstrap.secret.yaml
secret/bootstrap-token-c8ad9c created
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
for FILE in bootstrap-kubelet.kubeconfig; do
scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
done
done
8.4.5 Kubelet配置
master节点配置kubelet service
[root@k8s-master01 ~]# vim /lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done
下载镜像并上传至harbor:
[root@k8s-master01 ~]# docker login harbor.raymonds.cc
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
[root@k8s-master01 ~]# cat download_pause_images_3.4.1.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_pause_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
PAUSE_VERSION=3.4.1
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Pause镜像"${END}
docker pull registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
docker tag registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION} ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
docker rmi registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
docker push ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
${COLOR}"Pause镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_pause_images_3.4.1.sh
[root@k8s-master01 ~]# docker images |grep pause
harbor.raymonds.cc/google_containers/pause 3.4.1 80d28bedfe5d 23 months ago 683kB
master节点配置kubelet service的配置文件
[root@k8s-master01 ~]# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=harbor.raymonds.cc/google_containers/pause:3.4.1" #把harbor仓库改成自己的私有仓库地址
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf $NODE:/etc/systemd/system/kubelet.service.d/ ;done
master创建kubelet的配置文件
注意:如果更改了k8s的service网段,需要更改kubelet-conf.yml 的clusterDNS:配置,改成k8s Service网段的第十个地址,比如10.96.0.10
[root@k8s-master01 ~]# vim /etc/kubernetes/kubelet-conf.yml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done
启动master节点kubelet
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
└─10-kubelet.conf
Active: active (running) since Fri 2022-02-25 17:14:21 CST; 19s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 13943 (kubelet)
Tasks: 13
Memory: 39.9M
CGroup: /system.slice/kubelet.service
└─13943 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/k...
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: E0225 17:14:34.089479 13943 kubelet.go:2263] node "k8s-master01.example.l... found
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: E0225 17:14:34.190545 13943 kubelet.go:2263] node "k8s-master01.example.l... found
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: E0225 17:14:34.290854 13943 kubelet.go:2263] node "k8s-master01.example.l... found
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: I0225 17:14:34.367572 13943 kubelet_node_status.go:74] Successfully regis....local
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: I0225 17:14:34.391818 13943 kuberuntime_manager.go:1006] updating runtime...0.0/24
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: I0225 17:14:34.392004 13943 docker_service.go:362] docker cri received ru...24,},}
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: I0225 17:14:34.392109 13943 kubelet_network.go:77] Setting Pod CIDR: -> ...0.0/24
Feb 25 17:14:34 k8s-master01.example.local kubelet[13943]: E0225 17:14:34.400995 13943 kubelet.go:2183] Container runtime network no...alized
Feb 25 17:14:36 k8s-master01.example.local kubelet[13943]: W0225 17:14:36.348085 13943 cni.go:239] Unable to update cni config: no n.../net.d
Feb 25 17:14:37 k8s-master01.example.local kubelet[13943]: E0225 17:14:37.720283 13943 kubelet.go:2183] Container runtime network no...alized
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master02 ~]# systemctl status kubelet
[root@k8s-master03 ~]# systemctl status kubelet
此时系统日志/var/log/messages
#显示只有如下信息为正常
Feb 25 17:16:16 k8s-master01 kubelet: W0225 17:16:16.358217 13943 cni.go:239] Unable to update cni config: no networks found in /etc/cni/net.d
查看集群状态
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady <none> 13m v1.20.14
k8s-master02.example.local NotReady <none> 73s v1.20.14
k8s-master03.example.local NotReady <none> 70s v1.20.14
8.4.6 kube-proxy配置
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 ~]# kubectl -n kube-system create serviceaccount kube-proxy
#执行结果
serviceaccount/kube-proxy created
[root@k8s-master01 ~]# kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
#执行结果
clusterrolebinding.rbac.authorization.k8s.io/system:kube-proxy created
[root@k8s-master01 ~]# SECRET=$(kubectl -n kube-system get sa/kube-proxy \
--output=jsonpath='{.secrets[0].name}')
[root@k8s-master01 ~]# JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
--output=jsonpath='{.data.token}' | base64 -d)
[root@k8s-master01 ~]# PKI_DIR=/etc/kubernetes/pki
[root@k8s-master01 ~]# K8S_DIR=/etc/kubernetes
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
#执行结果
Cluster "kubernetes" set.
[root@k8s-master01 ~]# kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
User "kubernetes" set.
[root@k8s-master01 ~]# kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Context "kubernetes" created.
[root@k8s-master01 ~]# kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Switched to context "kubernetes".
在master01将kube-proxy的systemd Service文件发送到其他节点
如果更改了集群Pod的网段,需要更改kube-proxy/kube-proxy.conf的clusterCIDR: 192.168.0.0/12参数为pod的网段。
[root@k8s-master01 ~]# vim /etc/kubernetes/kube-proxy.conf
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 192.168.0.0/12 #修改pod网段
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
[root@k8s-master01 ~]# vim /lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.conf \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
done
master节点启动kube-proxy
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-02-25 18:05:42 CST; 14s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 24062 (kube-proxy)
Tasks: 6
Memory: 16.3M
CGroup: /system.slice/kube-proxy.service
└─24062 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.674150 24062 shared_informer.go:240] Waiting for caches...config
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.674161 24062 config.go:224] Starting endpoint slice con...roller
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.674163 24062 shared_informer.go:240] Waiting for caches...config
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.674291 24062 reflector.go:219] Starting reflector *v1.S...go:134
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.674706 24062 reflector.go:219] Starting reflector *v1be...go:134
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.678410 24062 service.go:275] Service default/kubernetes... ports
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.774399 24062 shared_informer.go:247] Caches are synced ...config
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.774481 24062 proxier.go:1036] Not syncing ipvs rules un...master
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.774491 24062 shared_informer.go:247] Caches are synced ...config
Feb 25 18:05:42 k8s-master01.example.local kube-proxy[24062]: I0225 18:05:42.774789 24062 service.go:390] Adding new service port "d...43/TCP
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master02 ~]# systemctl status kube-proxy
[root@k8s-master03 ~]# systemctl status kube-proxy
9.部署node
9.1 安装node组件
将组件发送到node节点
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
node节点创建/opt/cni/bin目录
[root@k8s-node01 ~]# mkdir -p /opt/cni/bin
[root@k8s-node02 ~]# mkdir -p /opt/cni/bin
[root@k8s-node03 ~]# mkdir -p /opt/cni/bin
9.2 复制etcd证书
node节点创建etcd证书目录
[root@k8s-node01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node03 ~]# mkdir /etc/etcd/ssl -p
将etcd证书复制到node节点
[root@k8s-etcd01 pki]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
所有master节点创建etcd的证书目录
[root@k8s-node01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node03 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
9.3 复制kubernetes证书和配置文件
node节点创建kubernetes相关目录
[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/pki
Master01节点复制证书至Node节点
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
done
done
9.4 配置kubelet
node节点创建相关目录
[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
Master01节点复制配置文件kubelet service至Node节点
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done
Master01节点复制kubelet service的配置文件至Node节点
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf $NODE:/etc/systemd/system/kubelet.service.d/ ;done
Master01节点kubelet的配置文件至Node节点
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done
启动node节点kubelet
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
└─10-kubelet.conf
Active: active (running) since Fri 2022-02-25 17:40:22 CST; 10s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 13450 (kubelet)
Tasks: 12
Memory: 38.2M
CGroup: /system.slice/kubelet.service
└─13450 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/k...
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.011461 13450 cpu_manager.go:194] [cpumanager] reconciling every 10s
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.011478 13450 state_mem.go:36] [cpumanager] initializing new ... store
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.014239 13450 policy_none.go:43] [cpumanager] none policy: Start
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: W0225 17:40:30.052409 13450 manager.go:595] Failed to retrieve checkpoint f... found
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.054679 13450 plugin_manager.go:114] Starting Kubelet Plugin Manager
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.076391 13450 kuberuntime_manager.go:1006] updating runtime c...4.0/24
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.076643 13450 docker_service.go:362] docker cri received runt...24,},}
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.076779 13450 kubelet_network.go:77] Setting Pod CIDR: -> 19...4.0/24
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: E0225 17:40:30.083546 13450 kubelet.go:2183] Container runtime network not ...alized
Feb 25 17:40:30 k8s-node01.example.local kubelet[13450]: I0225 17:40:30.182896 13450 reconciler.go:157] Reconciler: start to sync state
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-node02 ~]# systemctl status kubelet
[root@k8s-node03 ~]# systemctl status kubelet
查看集群状态
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady <none> 2m3s v1.21.8
k8s-master02.example.local NotReady <none> 2m v1.21.8
k8s-master03.example.local NotReady <none> 2m v1.21.8
k8s-node01.example.local NotReady <none> 16s v1.21.8
k8s-node02.example.local NotReady <none> 16s v1.21.8
k8s-node03.example.local NotReady <none> 16s v1.21.8
9.5 配置kube-proxy
Master01节点复制kube-proxy相关文件到node
[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
done
node节点启动kube-proxy
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-02-25 18:09:03 CST; 15s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 19089 (kube-proxy)
Tasks: 6
Memory: 18.0M
CGroup: /system.slice/kube-proxy.service
└─19089 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.647706 19089 config.go:315] Starting service config controller
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.647751 19089 shared_informer.go:240] Waiting for caches t...config
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.647767 19089 config.go:224] Starting endpoint slice confi...roller
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.647770 19089 shared_informer.go:240] Waiting for caches t...config
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.647868 19089 reflector.go:219] Starting reflector *v1beta...go:134
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.648339 19089 reflector.go:219] Starting reflector *v1.Ser...go:134
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.651713 19089 service.go:275] Service default/kubernetes u... ports
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.748659 19089 shared_informer.go:247] Caches are synced fo...config
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.748673 19089 shared_informer.go:247] Caches are synced fo...config
Feb 25 18:09:03 k8s-node01.example.local kube-proxy[19089]: I0225 18:09:03.749014 19089 service.go:390] Adding new service port "def...43/TCP
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-node02 ~]# systemctl status kube-proxy
[root@k8s-node03 ~]# systemctl status kube-proxy
查看haproxy状态