k8s高可用集群二进制部署
部署环境准备
节点规划
使用CentOS7u9操作系统,然后准备如下配置的六个节点
ip | CPU | 内存 | 硬盘 | 软件列表 | 主机名 |
---|---|---|---|---|---|
192.168.91.200 | 1C | 0.5G | 40GB | haproxy、keepalived | ha01 |
192.168.91.201 | 1C | 0.5G | 40GB | haproxy、keepalived | ha02 |
192.168.91.202 | 2C | 2G | 40GB | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy、docker-ce(Containerd、runc) | master01 |
192.168.91.203 | 2C | 2G | 40GB | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy、docker-ce(Containerd、runc) | master02 |
192.168.91.204 | 2C | 2G | 40GB | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy、docker-ce(Containerd、runc) | master03 |
192.168.91.205 | 2C | 2G | 40GB | kubelet、kube-proxy、docker-ce(Containerd、runc) | worker01 |
软件版本
软件名称 | 版本 |
---|---|
CentOS7 | kernel版本:6.5.7 |
kubernetes | v1.21.10 |
etcd | 3.5.2 |
calico | v3.19.4 |
coredns | v1.8.4 |
docker-ce | 20.10.13 |
containerd | 1.6.1 |
runc | 1.1.0 |
haproxy | 1.5.18 |
keepalived | v1.3.5 |
网络分配
网络名称 | 网段 |
---|---|
Node网络 | 192.168.91.0/24 |
Service网络 | 10.96.0.0/16 |
Pod网络 | 10.244.0.0/16 |
在所有节点中进行如下操作
# 基础配置
cat >> /etc/hosts << EOF
192.168.91.200 ha01
192.168.91.201 ha02
192.168.91.202 master01
192.168.91.203 master02
192.168.91.204 master03
192.168.91.205 worker01
EOF
yum -y install ntpdate
echo "0 */1 * * * ntpdate time1.aliyun.com" >> /var/spool/cron/root
systemctl disable firewalld && systemctl stop firewalld
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
sed -i 's&/dev/mapper/centos-swap&#/dev/mapper/centos-swap&' /etc/fstab
swapoff -a
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p
# 主机系统优化
# 临时修改
ulimit -SHn 65535
# 永久修改
cat >> /etc/security/limits.conf << EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
# Linux内核升级
yum -y install perl
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot
uname -r
6.5.7-1.el7.elrepo.x86_64
# 内核优化
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
在集群节点中进行如下操作,负载均衡节点不用
yum -y install ipvsadm ipset sysstat conntrack libseccomp
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
# 设置为开机启动
systemctl enable --now systemd-modules-load.service
# Docker安装
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install --setopt=obsoletes=0 docker-ce-20.10.9-3.el7
mkdir /etc/docker
cat << EOF > /etc/docker/daemon.json
{
"registry-mirrors": ["https://zwyx2n3v.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl enable docker && systemctl start docker
# 配置免密登录
# master01
ssh-keygen
# 按照提示输入多次yes和密码即可
for i in 2 3 4 5; do ssh-copy-id root@192.168.91.20$i; done
ssh root@192.168.91.203
exit
以Containerd为容器,则将上面的Docker安装替换成下面的操作,安装containerd和runc
# 加载containerd相关内核模块
# 临时加载模块
modprobe overlay
modprobe br_netfilter
# 永久性加载模块
cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
systemctl enable --now systemd-modules-load.service
# 查看containerd相关模块加载情况:
lsmod | egrep 'br_netfilter | overlay'
# 下载并安装containerd
wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gz
# 默认解压后会有如下目录:etc、opt、usr;会把对应的目解压到/下对应目录中,这样就省去复制文件步骤
tar -xf cri-containerd-cni-1.6.1-linux-amd64.tar.gz -C /
# 生成配置文件
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
继续修改/etc/containerd/config.toml
配置文件,修改其中相应的内容如下所示
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.mytest.com".tls]
insecure_skip_verify = true # 是否跳过安全认证
# harbor用户名和密码配置
[plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.mytest.com".auth]
username = "admin"
password = "Harbor12345"
[plugins."io.containerd.grpc.v1.cri".registry.headers]
# 各种镜像源配置
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
endpoint = ["https://gcr.mirrors.ustc.edu.cn"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
endpoint = ["https://quay.mirrors.ustc.edu.cn"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.mytest.com"]
endpoint = ["http://harbor.mytest.com"]
# 下载并安装runc,由于上述软件包中包含的runc对系统依赖过多,所以建议单独下载安装
wget https://github.com/opencontainers/runc/releases/download/v1.1.0/runc.amd64
chmod +x runc.amd64
# 替换掉原软件包中的runc
mv runc.amd64 /usr/local/sbin/runc
runc -v
runc version 1.1.0
commit: v1.1.0-0-g067aaf85
spec: 1.0.2-dev
go: go1.17.6
libseccomp: 2.5.3
systemctl enable containerd && systemctl start containerd
集群部署
HAProxy及Keepalived部署
HAProxy
# ha01和ha02
yum -y install haproxy
# haproxy修改配置
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.back
cat > /etc/haproxy/haproxy.cfg << "EOF"
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master01 192.168.91.202:6443 check
server master02 192.168.91.203:6443 check
server master03 192.168.91.204:6443 check
EOF
systemctl enable haproxy;systemctl start haproxy
curl ha01:33305/monitor
curl ha02:33305/monitor
<html><body><h1>200 OK</h1>
Service ready.
</body></html>
Keepalived
# ha01和ha02
yum -y install keepalived
# keepalived修改配置
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.back
cat > /etc/keepalived/keepalived.conf << "EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" # 此脚本需要多独定义,并要调用
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改为正在使用的网卡
mcast_src_ip 192.168.91.200 # 为本master主机对应的IP地址
virtual_router_id 51
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass abc123
}
virtual_ipaddress {
192.168.91.100 # 为VIP地址
}
track_script {
chk_apiserver # 执行上面检查apiserver脚本
}
}
EOF
# 健康检查脚本
cat > /etc/keepalived/check_apiserver.sh << "EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
# ha02 对配置文件做单独的修改
sed -i -e 's/192.168.91.200/192.168.91.201/' -e 's/priority 101/priority 99/' /etc/keepalived/keepalived.conf
# ha01和ha02
systemctl enable keepalived;systemctl start keepalived
# 验证高可用集群可用性
# ha01
ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:de:aa:82 brd ff:ff:ff:ff:ff:ff
inet 192.168.91.200/24 brd 192.168.91.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.91.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::8ef0:ab61:8b17:dc27/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::f55:5219:744d:32e4/64 scope link noprefixroute
valid_lft forever preferred_lft forever
ss -anput | grep ":6443"
tcp LISTEN 0 2000 127.0.0.1:6443 *:* users:(("haproxy",pid=1098,fd=6))
tcp LISTEN 0 2000 *:6443 *:* users:(("haproxy",pid=1098,fd=5))
# ha02
ss -anput | grep ":6443"
tcp LISTEN 0 2000 *:6443 *:* users:(("haproxy",pid=1107,fd=5))
tcp LISTEN 0 2000 127.0.0.1:6443 *:* users:(("haproxy",pid=1107,fd=6))
部署ETCD集群
# master01
# 创建工作目录
mkdir -p /data/k8s-work
cd /data/k8s-work
# 下面的操作没有特别说明,都是在master01节点的/data/k8s-work目录
获取cfssl工具
# cfssl是使用go编写,由CloudFlare开源的一款PKI/TLS工具。主要程序有:cfssl,是CFSSL的命令行工具;cfssljson用来从cfssl程序获取JSON输出,并将证书,密钥,CSR和bundle写入文件中
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
cfssl version
Version: 1.2.0
Revision: dev
Runtime: go1.6
创建CA证书
# 配置ca证书请求文件
cat > ca-csr.json << "EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubedemo",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF
# 创建ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# 配置ca证书策略,server auth 表示client可以对使用该ca对server提供的证书进行验证,client auth 表示server可以使用该ca对client提供的证书进行验证
cat > ca-config.json << "EOF"
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
创建etcd证书
# 配置etcd请求文件
cat > etcd-csr.json << "EOF"
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.91.202",
"192.168.91.203",
"192.168.91.204"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubedemo",
"OU": "CN"
}]
}
EOF
# 生成etcd证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
部署etcd集群
# 下载etcd软件包
wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
# 安装etcd软件
tar xf etcd-v3.5.2-linux-amd64.tar.gz
cp -p etcd-v3.5.2-linux-amd64/etcd* /usr/local/bin/
# 分发etcd软件
scp etcd-v3.5.2-linux-amd64/etcd* 192.168.91.203:/usr/local/bin/
scp etcd-v3.5.2-linux-amd64/etcd* 192.168.91.204:/usr/local/bin/
# 创建配置文件
mkdir /etc/etcd
cat > /etc/etcd/etcd.conf << "EOF"
#[Member]
# 节点名称,集群中唯一
ETCD_NAME="etcd1"
# 数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
# 集群通信监听地址
ETCD_LISTEN_PEER_URLS="https://192.168.91.202:2380"
# 客户端访问监听地址
ETCD_LISTEN_CLIENT_URLS="https://192.168.91.202:2379,http://127.0.0.1:2379"
#[Clustering]
# 集群通告地址
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.91.202:2380"
# 客户端通告地址
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.91.202:2379"
# 集群节点地址
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.91.202:2380,etcd2=https://192.168.91.203:2380,etcd3=https://192.168.91.204:2380"
# 集群Token
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
# 加入集群的当前状态,new是新集群,existing表示加入已有集群
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
# 创建服务配置文件
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
cp ca*.pem /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
cat > /etc/systemd/system/etcd.service << "EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-client-cert-auth \
--client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 同步etcd配置到集群其它master节点
# 修改配置,并复制到master02节点
sed -e '/^ETCD_INITIAL_CLUSTER/!s/192.168.91.202/192.168.91.203/g' -e 's/ETCD_NAME="etcd1"/ETCD_NAME="etcd2"/' /etc/etcd/etcd.conf > etcd.conf
ssh 192.168.91.203 "mkdir -p /etc/etcd/ssl;mkdir -p /var/lib/etcd/default.etcd"
scp etcd.conf 192.168.91.203:/etc/etcd/
# 修改配置,并复制到master03节点
sed -e '/^ETCD_INITIAL_CLUSTER/!s/192.168.91.202/192.168.91.204/g' -e 's/ETCD_NAME="etcd1"/ETCD_NAME="etcd3"/' /etc/etcd/etcd.conf > etcd.conf
ssh 192.168.91.204 "mkdir -p /etc/etcd/ssl;mkdir -p /var/lib/etcd/default.etcd"
scp etcd.conf 192.168.91.204:/etc/etcd/
# 同步证书文件和服务启动配置文件到集群其它master节点
for i in 192.168.91.203 192.168.91.204; do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl;scp /etc/systemd/system/etcd.service $i:/etc/systemd/system/; done
# 所有master节点执行
systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd
# 验证集群状态
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.91.202:2379,https://192.168.91.203:2379,https://192.168.91.204:2379 endpoint health
+-----------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+-----------------------------+--------+-------------+-------+
| https://192.168.91.202:2379 | true | 6.110995ms | |
| https://192.168.91.203:2379 | true | 9.93496ms | |
| https://192.168.91.204:2379 | true | 10.372877ms | |
+-----------------------------+--------+-------------+-------+
# 检查ETCD数据库性能
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.91.202:2379,https://192.168.91.203:2379,https://192.168.91.204:2379 check perf
59 / 60 Boooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom ! 98.33%PASS: Throughput is 150 writes/s
PASS: Slowest request took 0.016519s
PASS: Stddev is 0.000751s
PASS
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.91.202:2379,https://192.168.91.203:2379,https://192.168.91.204:2379 member list
+------------------+---------+-------+-----------------------------+-----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+-----------------------------+-----------------------------+------------+
| 6c634161d08d6df | started | etcd2 | https://192.168.91.203:2380 | https://192.168.91.203:2379 | false |
| 6352d967c0810a3a | started | etcd3 | https://192.168.91.204:2380 | https://192.168.91.204:2379 | false |
| adaa8bf3899a1d2d | started | etcd1 | https://192.168.91.202:2380 | https://192.168.91.202:2379 | false |
+------------------+---------+-------+-----------------------------+-----------------------------+------------+
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.91.202:2379,https://192.168.91.203:2379,https://192.168.91.204:2379 endpoint status
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.91.202:2379 | adaa8bf3899a1d2d | 3.5.2 | 22 MB | true | false | 3 | 9010 | 9010 | |
| https://192.168.91.203:2379 | 6c634161d08d6df | 3.5.2 | 22 MB | false | false | 3 | 9010 | 9010 | |
| https://192.168.91.204:2379 | 6352d967c0810a3a | 3.5.2 | 22 MB | false | false | 3 | 9010 | 9010 | |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
Kubernetes集群部署
# master01
cd /data/k8s-work
# 下面的操作没有特别说明,都是在master01节点的/data/k8s-work目录
# k8s下载安装
wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/local/bin/
# k8s软件分发
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy 192.168.91.203:/usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy 192.168.91.204:/usr/local/bin/
scp kubelet kube-proxy 192.168.91.205:/usr/local/bin
cd -
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes
部署api-server
创建apiserver证书请求文件
如果 hosts 字段不为空则需要指定授权使用该证书的 IP(含VIP) 或域名列表。由于该证书被集群使用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.96.0.1)
cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.91.202",
"192.168.91.203",
"192.168.91.204",
"192.168.91.205",
"192.168.91.206",
"192.168.91.207",
"192.168.91.208",
"192.168.91.209",
"192.168.91.210",
"192.168.91.100",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubedemo",
"OU": "CN"
}
]
}
EOF
生成apiserver证书及token文件
创建TLS机制所需TOKEN,TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
创建apiserver服务配置文件
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.91.202 \
--secure-port=6443 \
--advertise-address=192.168.91.202 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.91.202:2379,https://192.168.91.203:2379,https://192.168.91.204:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF
创建apiserver服务管理配置文件
cat > /etc/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
同步文件到集群master节点
cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
# 修改kube-apiserver.conf配置,并复制到master02节点
sed '1,8 s/192.168.91.202/192.168.91.203/g' /etc/kubernetes/kube-apiserver.conf > kube-apiserver.conf
ssh 192.168.91.203 "mkdir -p /etc/kubernetes/ssl;mkdir -p /var/log/kubernetes"
scp kube-apiserver.conf 192.168.91.203:/etc/kubernetes/kube-apiserver.conf
# 修改kube-apiserver.conf配置,并复制到master03节点
sed '1,8 s/192.168.91.202/192.168.91.204/g' /etc/kubernetes/kube-apiserver.conf > kube-apiserver.conf
ssh 192.168.91.204 "mkdir -p /etc/kubernetes/ssl;mkdir -p /var/log/kubernetes"
scp kube-apiserver.conf 192.168.91.204:/etc/kubernetes/kube-apiserver.conf
# 其它文件同步
for i in 192.168.91.203 192.168.91.204; do scp ca*.pem $i:/etc/kubernetes/ssl/;scp kube-apiserver*.pem $i:/etc/kubernetes/ssl/;scp token.csv $i:/etc/kubernetes/;scp /etc/systemd/system/kube-apiserver.service $i:/etc/systemd/system/kube-apiserver.service; done
启动apiserver服务
# 所有master节点执行
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver
curl --insecure https://192.168.91.202:6443/
curl --insecure https://192.168.91.203:6443/
curl --insecure https://192.168.91.204:6443/
curl --insecure https://192.168.91.100:6443/
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {
},
"status": "Failure",
"message": "Unauthorized",
"reason": "Unauthorized",
"code": 401
}
部署kubectl
创建kubectl证书请求文件
cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
注:这个admin 证书,是将来生成管理员用的kubeconfig 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group;"O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。
# 生成证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# 复制文件到指定目录
cp admin*.pem /etc/kubernetes/ssl/
# 生成kubeconfig配置文件;kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.100:6443 --kubeconfig=kube.config
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
kubectl config use-context kubernetes --kubeconfig=kube.config
# 准备kubectl配置文件并进行角色绑定
mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
# 查看集群状态
export KUBECONFIG=$HOME/.kube/config
kubectl cluster-info
Kubernetes control plane is running at https://192.168.91.100:6443
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
kubectl get all --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 24h
# 同步kubectl配置文件到集群其它master节点
for i in 192.168.91.203 192.168.91.204; do ssh $i 'mkdir /root/.kube';scp /root/.kube/config $i:/root/.kube/config; done
# 配置kubectl命令补全(可选)
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile
部署kube-controller-manager
创建kube-controller-manager证书请求文件
cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"192.168.91.202",
"192.168.91.203",
"192.168.91.204"
],
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
hosts 列表包含所有 kube-controller-manager 节点 IP;CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限
# 创建kube-controller-manager证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# 创建kube-controller-manager的kube-controller-manager.kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.100:6443 --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
# 创建kube-controller-manager配置文件
cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
# 创建服务启动文件
cat > kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# 同步文件到集群master节点
cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
for i in 192.168.91.203 192.168.91.204; do scp kube-controller-manager*.pem $i:/etc/kubernetes/ssl/;scp kube-controller-manager.kubeconfig kube-controller-manager.conf $i:/etc/kubernetes/;scp kube-controller-manager.service $i:/usr/lib/systemd/system/; done
# 查看证书
openssl x509 -in /etc/kubernetes/ssl/kube-controller-manager.pem -noout -text
# 在所有master节点,启动服务
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
# 查看组件状态
kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
部署kube-scheduler
# 创建kube-scheduler证书请求文件
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.91.202",
"192.168.91.203",
"192.168.91.204"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
# 生成kube-scheduler证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# 创建kube-scheduler的kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.100:6443 --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# 创建服务配置文件
cat > kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
# 创建服务启动配置文件
cat > kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# 同步文件至集群master节点
cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
for i in 192.168.91.203 192.168.91.204; do scp kube-scheduler*.pem $i:/etc/kubernetes/ssl/;scp kube-scheduler.kubeconfig kube-scheduler.conf $i:/etc/kubernetes/;scp kube-scheduler.service $i:/usr/lib/systemd/system/; done
# 在所有master节点,启动服务
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler
# 查看组件状态
kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
工作节点(worker node)部署
# worker01
# 创建文件夹
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes
# 下面的操作没有特别说明,都是在master01节点的/data/k8s-work目录
部署kubelet
# 创建kubelet-bootstrap.kubeconfig
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.100:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl describe clusterrolebinding cluster-system-anonymous
Name: cluster-system-anonymous
Labels: <none>
Annotations: <none>
Role:
Kind: ClusterRole
Name: cluster-admin
Subjects:
Kind Name Namespace
---- ---- ---------
User kubelet-bootstrap
kubectl describe clusterrolebinding kubelet-bootstrap
Name: kubelet-bootstrap
Labels: <none>
Annotations: <none>
Role:
Kind: ClusterRole
Name: system:node-bootstrapper
Subjects:
Kind Name Namespace
---- ---- ---------
User kubelet-bootstrap
# 创建kubelet配置文件
# 使用
cat > kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "192.168.91.202",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF
# 创建kubelet配置文件,注意使用docker和containerd不一样
# docker
cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet.json \
--network-plugin=cni \
--rotate-certificates \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# containerd
cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet.json \
--cni-bin-dir=/opt/cni/bin \
--cni-conf-dir=/etc/cni/net.d \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--network-plugin=cni \
--rotate-certificates \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
--root-dir=/etc/cni/net.d \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# 同步文件到集群其它节点
cp kubelet-bootstrap.kubeconfig kubelet.json /etc/kubernetes/
cp kubelet.service /usr/lib/systemd/system/
# kubelet.json中address需要修改为当前主机IP地址
for i in 3 4 5; do j=192.168.91.20$i; sed "s/192.168.91.202/$j/" /etc/kubernetes/kubelet.json > kubelet.json; scp kubelet-bootstrap.kubeconfig kubelet.json $j:/etc/kubernetes/; scp kubelet.service $j:/usr/lib/systemd/system/; done
scp ca.pem 192.168.91.205:/etc/kubernetes/ssl/
# 所有master节点和worker节点,创建目录及启动服务
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
# 任意一个master节点,使用containerd时,这里的状态都是Ready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 NotReady <none> 11m v1.21.10
master02 NotReady <none> 11m v1.21.10
master03 NotReady <none> 11m v1.21.10
worker01 NotReady <none> 11m v1.21.10
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-82scp 12m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-dnnxp 40m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-n8r6n 12m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-znnzj 12m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-ztrkx 12m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
部署kube-proxy
# 创建kube-proxy证书请求文件
cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubedemo",
"OU": "CN"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# 创建kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.100:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
# 创建服务配置文件
cat > kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.91.202
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.91.202:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.91.202:10249
mode: "ipvs"
EOF
# 创建服务启动管理文件
cat > kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 同步文件到集群其它节点
cp kube-proxy*.pem /etc/kubernetes/ssl/
cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
cp kube-proxy.service /usr/lib/systemd/system/
# 修改kube-proxy.yaml中IP地址为当前主机IP
for i in 3 4 5; do j=192.168.91.20$i; sed "s/192.168.91.202/$j/g" /etc/kubernetes/kube-proxy.yaml > kube-proxy.yaml; scp kube-proxy.kubeconfig kube-proxy.yaml $j:/etc/kubernetes/; scp kube-proxy.service $j:/usr/lib/systemd/system/; done
# 所有master节点和worker节点,创建目录及启动服务
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
网络组件部署 Calico
wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml
# 修改calico.yaml文件,将3683和3684行放开,并修改
3683 - name: CALICO_IPV4POOL_CIDR
3684 value: "10.244.0.0/16"
# 应用文件
kubectl apply -f calico.yaml
# 验证应用结果
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-7qmm2 1/1 Running 0 10m
kube-system calico-node-57xh2 1/1 Running 0 10m
kube-system calico-node-8844t 1/1 Running 0 10m
kube-system calico-node-9w62w 1/1 Running 0 10m
kube-system calico-node-mkjb8 1/1 Running 0 10m
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready <none> 33m v1.21.10
master02 Ready <none> 33m v1.21.10
master03 Ready <none> 33m v1.21.10
worker01 Ready <none> 40m v1.21.10
部署CoreDNS
cat > coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
kubectl apply -f coredns.yaml
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-7qmm2 1/1 Running 0 13m
kube-system calico-node-57xh2 1/1 Running 0 13m
kube-system calico-node-8844t 1/1 Running 0 13m
kube-system calico-node-9w62w 1/1 Running 0 13m
kube-system calico-node-mkjb8 1/1 Running 0 13m
kube-system coredns-675db8b7cc-xdjlm 1/1 Running 0 85s
部署应用验证
cat > nginx.yaml << "EOF"
---
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-web
spec:
replicas: 2
selector:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service-nodeport
spec:
ports:
- port: 80
targetPort: 80
nodePort: 30001
protocol: TCP
type: NodePort
selector:
name: nginx
EOF
kubectl apply -f nginx.yaml
kubectl get all
NAME READY STATUS RESTARTS AGE
pod/nginx-web-rmccc 1/1 Running 0 85s
pod/nginx-web-wsf8k 1/1 Running 0 85s
NAME DESIRED CURRENT READY AGE
replicationcontroller/nginx-web 2 2 2 85s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d
service/nginx-service-nodeport NodePort 10.96.103.158 <none> 80:30001/TCP 85s
# 所有master和worker节点都可以访问
curl 192.168.91.202:30001
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...
Kubernetes集群节点管理
本小节主要介绍worker节点管理,这里是对使用docker时的实验结果
节点准备
参考"部署环境准备",准备一个worker02节点,ip为192.168.91.206,配置免密登录时,只需要配置master01到worker02的免密登录,其它节点已经配置过了。
# 下面的操作没有特别说明,都是在master01节点的/data/k8s-work目录
ssh-copy-id 192.168.91.206
cd kubernetes/server/bin/
scp kubelet kube-proxy 192.168.91.206:/usr/local/bin
cd -
for i in 2 3 4 5 6; do j=192.168.91.20$i; ssh $j 'echo "192.168.91.206 worker02" >> /etc/hosts'; done
ssh 192.168.91.206 "mkdir -p /etc/kubernetes/ssl;mkdir -p /var/lib/kubelet;mkdir -p /var/log/kubernetes"
# 部署kubelet
sed "s/192.168.91.202/192.168.91.206/" /etc/kubernetes/kubelet.json > kubelet.json
scp kubelet-bootstrap.kubeconfig kubelet.json 192.168.91.206:/etc/kubernetes/
scp ca.pem 192.168.91.206:/etc/kubernetes/ssl/
scp kubelet.service 192.168.91.206:/usr/lib/systemd/system/
# worker02
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
# master01
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready <none> 16h v1.21.10
master02 Ready <none> 16h v1.21.10
master03 Ready <none> 16h v1.21.10
worker01 Ready <none> 17h v1.21.10
worker02 Ready <none> 5m59s v1.21.10
# 部署kube-proxy
sed "s/192.168.91.202/192.168.91.206/g" /etc/kubernetes/kube-proxy.yaml > kube-proxy.yaml
scp kube-proxy.kubeconfig kube-proxy.yaml 192.168.91.206:/etc/kubernetes/
scp kube-proxy.service 192.168.91.206:/usr/lib/systemd/system/
# worker02
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
# 验证
# master01
kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-7cc8dd57d9-7qmm2 1/1 Running 2 16h 10.244.5.2 worker01 <none> <none>
calico-node-57xh2 1/1 Running 1 16h 192.168.91.203 master02 <none> <none>
calico-node-8844t 1/1 Running 1 16h 192.168.91.204 master03 <none> <none>
calico-node-9w62w 1/1 Running 1 16h 192.168.91.205 worker01 <none> <none>
calico-node-mkjb8 1/1 Running 1 16h 192.168.91.202 master01 <none> <none>
calico-node-qw77q 1/1 Running 0 8m58s 192.168.91.206 worker02 <none> <none>
coredns-675db8b7cc-xdjlm 1/1 Running 1 16h 10.244.59.194 master02 <none> <none>
kubectl label nodes worker02 deploy.type=nginxapp
node/worker02 labeled
curl http://192.168.91.206:30001
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...