version - 1.28
install containerd
# install containerd
apt update && apt install -y containerd
# check version
containerd -v
# generate config for containerd
mkdir -p /etc/containerd && \
containerd config default > /etc/containerd/config.toml
# change containerd config and find " SystemdCgroup " and change the variable to true
sudo vim /etc/containerd/config.toml
SystemdCgroup: true
# start containerd
sudo systemctl enable --now containerd
confirm server time zone
# set shanghai local time
sudo timedatectl set-timezone Asia/Shanghai
# restart time sync service
sudo systemctl restart systemd-timesyncd.service
# check the time service status
timedatectl status
close "swap paration"
# close for temporary
sudo swapoff -a
close "SELinux", in linux sys , the default of "SELinux" is true
# install policycoreutils
sudo apt install -y policycoreutils
# check the status of selinux, then you will see " SENLinux status: disable "
sestatus
config your etc/hosts
sudo vi /etc/hosts
# add your master and work node
192.168.64.8 master
192.168.64.9 work1
192.168.64.10 work2
config " IPV4 and iptables "
# add rule in " /etc/modules-load.d/k8s.conf "
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# exce rule
sudo modprobe overlay
sudo modprobe br_netfilter
# set env variables
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# apply the env variables but not restart server
sudo sysctl --system
# exec the command to ensure the env variablses above have set
lsmod | grep br_netfilter
lsmod | grep overlay
# continue to ensure the env variablses above have set, the values are both 1
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
install kubernetes
# update apt package, and install the packages that kubernets required
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl gpg
# download the publice-key of kubernetes repository,because of the publice-key are the same one,
# so need't to conseried the version: " stable:/v1.28 "
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# add kuberntes repository, here you must choice the version that you want to install
# example: https://pkgs.k8s.io/core:/stable:/v1.28/deb/ ---> https://pkgs.k8s.io/core:/stable:/v1.32/deb/
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
# update apt package and install kubelet, kubeadm, kubectl,in case of the version that you have
# installed,lock the binding version
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
# check the installed kubeadm version
kubeadm version
initial you master node 【 just for your master node】
# pull api-server,controller-manager,schedule,proxy,pause,etcd and coredns images.
# it should be noted that the " --kubernetes-version " must be in accordance with the download kubernetes version
# and the command means to install master node components
# like: api-server,controller-manager,schedule,proxy,pause,etcd,coredns.
sudo kubeadm config images pull \
--image-repository=registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.28.15 \
--cri-socket=unix:///run/containerd/containerd.sock
# in the end, initial the kubernets cluster
# carefully, the "--apiserver-advertise-address" must be your master ip
# and the varibale " --kubernetes-version " alse must be accordance with the download version
# " --service-cidr" menas to define the range of serive ip
# " --pod-network-cidr " means to define the range of pod ip
# and this command also need to exec in you master node
sudo kubeadm init \
--apiserver-advertise-address=192.168.64.8 \
--image-repository=registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.28.15 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.10.0.0/16 \
--cri-socket=unix:///run/containerd/containerd.sock
in the end , the instalition of kubernetes almost success, you just need to follow the constraction
# To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
#Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.64.8:6443 --token 2qz4xc.t2ioe708votugm7t \
--discovery-token-ca-cert-hash sha256:5544af15c1db2013a607aaa197e3c378659a1b592541abc9d5dac65f30bd494d
# check your kubernetes node in your master cluster
kubectl get nodes -o wide
install Calico Net plugin 【 optional 】
# install Calico Net resource
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/tigera-operator.yaml
# download custom-resources.yaml
curl -LO https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/custom-resources.yaml
# config pod net range address
# at the stage of 【initial master node】 , config the " --pod-network-cidr=10.10.0.0/16 "
# so config in here
sed -i 's/cidr: 192.168.0.0/cidr: 10.10.0.0/g' custom-resources.yaml
# user kubectl apply the resource
kubectl create -f custom-resources.yaml
# keep watching the prosess of create
watch kubectl get all -o wide -n calico-system
install Flannel plugin 【 recommend 】
# 确保curl已安装
sudo apt-get update
sudo apt-get install curl
# download yml
curl -O https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# config yml---> "Network": "10.10.0.0/16" #pod net range address
net-conf.json: |
{
"Network": "10.10.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
# apply
kubectl apply -f kube-flannel.yml
install kuboard
# download kuboard-v3-swr.yaml
wget https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yaml
# config kuboard-v3-swr.yaml
spec:
nodeName: master # add your master node name
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
weight: 100
- preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
weight: 100
# apply
kubectl apply -f kuboard-v3-swr.yaml
# watch apply prosess
watch kubectl get pod -n kuboard -o wide
# then config your kuboard
apikey
apiserver address: https://192.168.64.5:6443
Q & A
- after your work node exec " kubeadm join 192.168.10.210:6443 --token tv9mkx.tw7it9vphe158e74
--discovery-token-ca-cert-hash sha256:e8721b8630d5b562e23c010c70559a6d3084f629abad6a2920e87855f8fb96f3", exec " kubectl get pod all" will show :
-
/etc/kubernetes/admin.conf 是 Master 节点初始化时生成的配置文件,包含以下关键信息:
apiVersion: v1 clusters: - cluster: certificate-authority-data: <CA 证书> # 集群的根证书 提供身份认证向 API 服务器证明操作权限,允许执行kubectl get pods 等命令;校验证书链,确保连接的 API 服务器是受信任的 server: https://192.168.10.210:6443 # API 服务器的地址和端口 指定 API 服务器地址 name: kubernetes contexts: - context: cluster: kubernetes user: kubernetes-admin name: kubernetes-admin@kubernetes current-context: kubernetes-admin@kubernetes kind: Config users: - name: kubernetes-admin user: client-certificate-data: <客户端证书> # 管理员身份证书 client-key-data: <客户端私钥> # 管理员私钥<img src="attachment:49d81c23f45509167d1d9f5788522cab" alt="截图" style="zoom:40%;" /> -
安全设置
kubectl 的配置文件包含敏感信息(如管理员私钥)。如果自动复制到所有 Worker 节点,会增加密钥泄露风险。
Worker 节点通常不需要执行管理操作,按需配置更符合最小权限原则。 -
职责分离
kubeadm 的职责是初始化集群和加入节点,而 kubectl 的配置属于用户层面的操作。
生产环境中,通常只在运维终端或 CI/CD 系统上配置 kubectl,而非所有节点。
- 获取加入master新token
kubeadm token create --print-join-command