初始环境准备
# 关闭swap分区
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
# 禁用 SELINUX
setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
systemctl stop firewalld.service
systemctl disable firewalld.service
# 安装iptables
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
lsmod | grep ip_vs
# 将桥接的IPv4流量传递到iptables的链
cat >>/etc/sysctl.d/k8s.conf<< OFF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
OFF
modprobe br_netfilter
#查看
sysctl -p /etc/sysctl.d/k8s.conf
概述
- 都要安装docker和kubelet服务
# 使用aliyun源
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
-i.bak \
/etc/yum.repos.d/Rocky-*.repo
dnf makecache
# docker
yum install -y yum-utils device-mapper-persistent-data lvm2 wget curl net-tools vim
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache
yum -y install docker-ce docker-ce-cli containerd.io
# Create daemon json config file
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<EOF
{
"registry-mirrors" : [
"https://registry.cn-shenzhen.aliyuncs.com",
"https://registry.docker-cn.com"
],
"data-root":"/www/data/docker_run",
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 10,
"insecure-registries" : ["0.0.0.0/0"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"storage-driver": "overlay2"
}
EOF
systemctl enable docker
systemctl start docker
docker version
# kubernetes
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
yum --showduplicates list kubeadm
yum install kubeadm-1.21.1-0 kubelet-1.21.1-0 kubectl-1.21.1-0
kubeadm upgrade apply v1.21.1 # 升级到指定版本
systemctl enable kubelet && systemctl start kubelet
# 初始化集群
kubeadm init --apiserver-advertise-address=192.168.101.71 \
--image-repository=registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.23.4 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16
# 安装flannel
wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
#下载yml后在- --kube-subnet-mgr后新增网卡绑定:- --iface=enth0最后部署
kubectl apply -f kube-flannel.yml
# worker节点加入
kubeadm token create --print-join-command
kubeadm join 10.0.2.170:6443 --token 61aox7.y73n3kw5cpcwc7dd --discovery-token-ca-cert-hash sha256:c5e187ecf52005d09a399efab99e40b14014fab4a201edd32cd
journalctl -xeu kubelet
问题1:Failed to run kubelet" err="failed to run Kubelet: misconfiguration: kubelet cgroup driver
在docker中修改配置文件,然后重启docker
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://registry.cn-shenzhen.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker
问题2:Node节点一直处于NotReady状态
原因:Pod网络插件没安装好
# 安装Pod网络插件(CNI)
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
问题3:flannel组建pod出现crashloopbackoff
原因:需要与本机进行交互
setenforce 0
119.23.168.223 tekton.intbee.com
kubectl create ingress demo --class=nginx --rule www.demo.io/=demo:80
kubectl create ingress tekton-dashboard --class=nginx --rule=tekton.intbee.com/*=tekton-dashboard:9097
kubectl port-forward --namespace=ingress-nginx service/ingress-nginx-controller 8080:80
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tekton-service
namespace: tekton-pipelines
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 256m
spec:
rules:
- host: tekton.intbee.com
http:
paths:
- path: /
backend:
serviceName: tekton-dashboard
servicePort: 9097
----
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-tekton
namespace: tekton-pipelines
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 256m
spec:
rules:
- host: tekton.intbee.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: tekton-dashboard
port:
number: 9097
ingressClassName: nginx
kubectl expose deployment ingress-tekton --type=LoadBalancer --name=lb-ingress-tekton -n tekton-pipelines
服务器更换静态ip后重新配置master和node节点
# 将etcd.yaml kube-apiserver.yaml里的ip地址替换为新的ip
cp /etc/kubernetes/manifests/etcd.yaml /etc/kubernetes/manifests/etcd.yaml.$(date "+%Y%m%d_%H%M%S")
cp /etc/kubernetes/manifests/etcd.yaml /etc/kubernetes/manifests/kube-apiserver.yaml.$(date "+%Y%m%d_%H%M%S")
sed -i "s/10.1.3.0/10.0.2.170/g" /etc/kubernetes/manifests/etcd.yaml
sed -i "s/10.1.3.0/10.0.2.170/g" /etc/kubernetes/manifests/kube-apiserver.yaml
#生成新的config文件
mv /etc/kubernetes/admin.conf /etc/kubernetes/admin.conf.$(date "+%Y%m%d_%H%M%S")
kubeadm init phase kubeconfig admin --apiserver-advertise-address 10.0.2.170
#删除老证书,生成新证书
mv /etc/kubernetes/pki/apiserver.key /etc/kubernetes/pki/apiserver.key.$(date "+%Y%m%d_%H%M%S")
mv /etc/kubernetes/pki/apiserver.crt /etc/kubernetes/pki/apiserver.crt.$(date "+%Y%m%d_%H%M%S")
kubeadm init phase certs apiserver --apiserver-advertise-address 10.0.2.170
#重启docker和kubelet
service docker restart
service kubelet restart
#将配置文件config输出
cp /etc/kubernetes/admin.conf ~/.kube/config