3台机器,1台master 2台node
内核升级
systemctl stop firewalld
systemctl disable firewalld
vim /etc/selinux/config
修改 SELINUX=disabled
yum -y update
yum remove kernel-tools kernel-headers kernel-tools-libs
yum localinstall -y kernel-lt-*.rpm
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
sudo grub2-set-default 0
sudo grub2-mkconfig -o /boot/grub2/grub.cfg
reboot
uname -r
内核优化参数
cat >/etc/sysctl.d/k8s-sysctl.conf << EOF
# bcs config begin
# 系统中每一个端口最大的监听队列的长度,这是个全局的参数,默认值128太小,32768跟友商一致
net.core.somaxconn=32768
# 大量短连接时,开启TIME-WAIT端口复用
net.ipv4.tcp_tw_reuse=1
# TCP半连接队列长度。值太小的话容易造成高并发时客户端连接请求被拒绝
net.ipv4.tcp_max_syn_backlog=8096
# RPS是将内核网络rx方向报文处理的软中断分配到合适CPU核,以提升网络应用整体性能的技术。这个参数设置RPS flow table大小
fs.inotify.max_user_instances=8192
# inotify watch总数量限制。调大该参数避免"Too many open files"错误
fs.inotify.max_user_watches=524288
# 使用bpf需要开启
net.core.bpf_jit_enable=1
# 使用bpf需要开启
net.core.bpf_jit_harden=1
# 使用bpf需要开启
net.core.bpf_jit_kallsyms=1
# 用于调节rx软中断周期中内核可以从驱动队列获取的最大报文数,以每CPU为基础有效,计算公式(dev_weight * dev_weight_tx_bias)。主要用于调节网络栈和CPU在tx上的不对称
net.core.dev_weight_tx_bias=1
# socket receive buffer大小
net.core.rmem_max=16777216
# RPS是将内核网络rx方向报文处理的软中断分配到合适CPU核,以提升网络应用整体性能的技术。这个参数设置RPS flow table大小
net.core.rps_sock_flow_entries=8192
# socket send buffer大小
net.core.wmem_max=16777216
# 避免"neighbor table overflow"错误(发生过真实客户案例,触发场景为节点数量超过1024,并且某应用需要跟所有节点通信)
net.ipv4.neigh.default.gc_thresh1=2048
# 同上
net.ipv4.neigh.default.gc_thresh2=8192
# 同上
net.ipv4.neigh.default.gc_thresh3=16384
# orphan socket是应用以及close但TCP栈还没有释放的socket(不包含TIME_WAIT和CLOSE_WAIT)。 适当调大此参数避免负载高时报'Out of socket memory'错误。32768跟友商一致。
net.ipv4.tcp_max_orphans=32768
# 代理程序(如nginx)容易产生大量TIME_WAIT状态的socket。适当调大这个参数避免"TCP: time wait bucket table overflow"错误。
net.ipv4.tcp_max_tw_buckets=16384
# TCP socket receive buffer大小。 太小会造成TCP连接throughput降低
net.ipv4.tcp_rmem=4096 12582912 16777216
# TCP socket send buffer大小。 太小会造成TCP连接throughput降低
net.ipv4.tcp_wmem=4096 12582912 16777216
# 控制每个进程的内存地址空间中 virtual memory area的数量
vm.max_map_count=262144
# 为了支持k8s service, 必须开启
net.ipv4.ip_forward=1
# ubuntu系统上这个参数缺省为"/usr/share/apport/apport %p %s %c %P"。在容器中会造成无法生成core文件
kernel.core_pattern=core
# 内核在发生死锁或者死循环的时候可以触发panic,默认值是0.
kernel.softlockup_panic=0
# 使得iptable可以作用在网桥上
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
# 系统全局PID号数值的限制。
kernel.pid_max=4194304
# 系统进程描述符总数量限制,根据内存大小动态计算得出,TOTAL_MEM为系统的内存总量,单位是字节,THREAD_SIZE默认为16,单位是kb。
kernel.threads-max=14556
# 整个系统fd(包括socket)的总数量限制。根据内存大小动态计算得出,TOTAL_MEM为系统的内存总量,单位是字节,调大该参数避免"Too many open files"错误。
fs.file-max=186325
# bcs config end
EOF
## limit 设置
cat > /etc/security/limits.d/bcs-limits.conf << EOF
# bcs config begin
* soft nproc 1028546
* hard nproc 1028546
* soft nofile 204800
* hard nofile 204800
#
EOF
基础配置
#修改主机名
hostnamectl set-hostname master
cat >> /etc/hosts << EOF
192.168.1.223 master
192.168.1.224 node-1
192.168.1.225 node-2
EOF
#关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
#关闭selinux
setenforce 0 #临时关闭
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config #永久关闭
#关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#格式化数据盘并挂载
fdisk /dev/sdb
n
p
w
mkfs.ext4 /dev/sdb1
mkdir /data && mount /dev/sdb1 /data
containerd
#containerd 安装
wget https:
tar Cxzvf /usr/local containerd-1.7.22-linux-amd64.tar.gz
#service 启动文件
wget -O /etc/systemd/system/containerd.service https:
cat /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https:
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
# 启动containerd
systemctl daemon-reload
systemctl enable --now containerd
## 2、Installing runc
wget https:
install -m 755 runc.amd64 /usr/local/sbin/runc
#配置containerd
# 创建containerd目录
mkdir /etc/containerd
# 恢复默认配置文件
containerd config default > /etc/containerd/config.toml
# 切换为国内源
sed -i 's/registry.k8s.io/registry.aliyuncs.com\/google_containers/' /etc/containerd/config.toml
# 修改SystemCgroup为true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# 重启服务
systemctl daemon-reload
systemctl restart containerd
#镜像加速
mkdir /etc/containerd/certs.d
cat docker.io/hosts.toml
erver ="https://docker.io"
[host."https://docker.m.daocloud.io"]
capabilities =["pull","resolve"]
[host."https://reg-mirror.giniu.com"]
capabilities =["pull","resolve"]
cat registry.k8s.io/hosts.toml
server ="https://registry.k8s.io"
[host."https://k8s.m.daocloud.io"]
capabilities =["pull","resolve","push"]
安装kubeadm、kubelet 和 kubectl
#添加 Kubernetes 的 yum 仓库
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https:
enabled=1
gpgcheck=1
gpgkey=https:
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
#安装 kubelet、kubeadm 和 kubectl,并启用 kubelet 以确保它在启动时自动启动
sudo yum install -y kubelet-1.31.6 kubeadm-1.31.6 kubectl-1.31.6 --disableexcludes=kubernetes
sudo systemctl enable --now kubelet
systemctl start kubelet
kubeadm创建集群
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.31.6
初始化控制节点
# 创建初始化配置文件
kubeadm config print init-defaults > /etc/kubernetes/init-default.yaml
# 修改为国内阿里源
sed -i 's/registry.k8s.io/registry.aliyuncs.com\/google_containers/' /etc/kubernetes/init-default.yaml
# 设置 apiServerIP 地址. 请自行替换192.168.1.223为自己宿主机IP
sed -i 's/1.2.3.4/192.168.1.223/' /etc/kubernetes/init-default.yaml
# 文件内容
[root@master ~]# cat /etc/kubernetes/init-default.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.223 #master地址
bindPort: 6443
nodeRegistration:
criSocket: unix:
imagePullPolicy: IfNotPresent
imagePullSerial: true
name: node
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.31.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 192.168.0.0/16 #加入此句
proxy: {}
scheduler: {}
# 初始化主节点
kubeadm init --image-repository registry.aliyuncs.com/google_containers
安装网络插件 calico
ctr images pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/cni:v3.28.2
ctr images tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/cni:v3.28.2 docker.io/calico/cni:v3.28.2
ctr images pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/node:v3.28.2
ctr images tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/node:v3.28.2 docker.io/calico/node:v3.28.2
ctr images pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/kube-controllers:v3.28.2
ctr images tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/kube-controllers:v3.28.2 docker.io/calico/kube-controllers:v3.28.2
安装ingress
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
externalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
spec:
containers:
- args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.12.0@sha256:e6b8de175acda6ca913891f0f727bca4527e797d52688cbe9fec9040d6f6b6fa
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsGroup: 82
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None
# 创建服务ingress时 不能使用server-snippet configuration-snippet等注解
#关闭webhook验证会报错
Resource: "networking.k8s.io/v1, Resource=ingresses", GroupVersionKind: "networking.k8s.io/v1, Kind=Ingress"
Name: "test-ingress", Namespace: "test"
for: "ingress.yaml": error when patching "ingress.yaml": admission webhook "validate.nginx.ingress.kubernetes.io" denied the request: annotation group ServerSnippet contains risky annotation based on ingress configuration
#直接删除webhook绕过验证
kubectl delete validatingwebhookconfiguration ingress-nginx-admission
#默认配置外部不能访问 ingress 端口
#修改ingress-nginx-controller local 改为 Cluster
externalTrafficPolicy: Cluster
prometheus
#修改 manifests alertmanager-service.yaml prometheus-service.yaml alertmanager-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
name: alertmanager-main
namespace: monitoring
spec:
type: NodePort #三个配置都要添加
ports:
- name: web
port: 9093
targetPort: web
- name: reloader-web
port: 8080
targetPort: reloader-web
selector:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
sessionAffinity: ClientIP
#创建自定义资源类型
cd /root/kube-prometheus-release-0.13/manifests/setup
kubectl create -f .
#创建prometheus和grafana
cd /root/kube-prometheus-release-0.13/manifests
kubectl create -f .
#完成以上步骤后,granfana等服务不能通过service访问,nodepod也不行;删除网络策略就可
kubectl delete networkpolicy --all -n monitoring
#重启ingress
kubectl -n ingress-nginx rollout restart deployment ingress-nginx-controller
#ingress-nginx 1.12 添加
#nginx.ingress.kubernetes.io/configuration-snippet
#nginx.ingress.kubernetes.io/server-snippet 等;
#关闭webhook
vim ingress-nginx-1.12.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
containers:
- name: nginx-ingress-controller
image: nginx-ingress-controller:latest
args:
- /nginx-ingress-controller
- --enable-admission-webhook=false
nfs
yum install nfs-utils -y #所有节点都执行
yum install nfs-utils rpcbind –y
mkdir /nfs/data
cat /etc/exports
/nfs/data *(rw,no_root_squash)
systemctl enable nfs
systemctl start nfs
systemctl enable rpcbind
systemctl start rpcbind
chmod 666 /data/ -R
exportfs -arv
showmount -e localhost
#Kubernetes 支持 NFS 存储,需要安装 nfs-subdir-external-provisioner ,它是一个存储资源自动调配器,它可将现有的 NFS 服务器通过持久卷声明来支持 Kubernetes 持久卷的动态分配。该组件是对 Kubernetes NFS-Client Provisioner 的扩展, nfs-client-provisioner 已经不提供更新,而且 nfs-client-provisioner Github 仓库[1] 也已经处于归档状态,已经迁移到 nfs-subdir-external-provisioner[2] 的仓库。
wget https:
unzip nfs-subdir-external-provisioner-4.0.18.zip
cd nfs-subdir-external-provisioner-nfs-subdir-external-provisioner-4.0.18/
#创建nfs命名空间
kubectl create ns nfs-system
#修改命名空间
sed -i'' "s/namespace:.*/namespace: nfs-system/g" ./deploy/rbac.yaml ./deploy/deployment.yaml
#创建RBAC
kubectl create -f deploy/rbac.yaml
#修改默认配置
cat deploy/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 #压缩包中
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.1.223 #修改
- name: NFS_PATH
value: /nfs/data #修改
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.223 #修改
path: /nfs/data #修改
#NFS Subdir External Provisioner
kubectl apply -f deploy/deployment.yaml
#部署 Storage Class
cat deploy/class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-sc
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "true"
kubectl apply -f deploy/class.yaml
pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kuboard-data-pvc
namespace: kuboard
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
helm
tar -zxf helm-v3.17.1-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
#列出仓库地址
helm repo list
#删除仓库
helm repo remove stable
#添加仓库
helm repo add stable https:
helm repo add kaiyuanshe http:
helm repo add azure http:
helm repo add dandydev https:
helm repo add bitnami https:
#检索chart
helm search repo redis
#拉取
helm pull bitnami/redis-cluster --version 8.1.2
#安装
helm install redis-cluster bitnami/redis-cluster --setglobal.storageClass=nfs,global.redis.password=xiagao --version 8.1.2
#卸载
helm uninstall redis-cluster
kuboard
cat kuboard-v3.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: kuboard
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kuboard-v3-config
namespace: kuboard
data:
# 关于如下参数的解释,请参考文档 https:
# [common]
KUBOARD_ENDPOINT: 'http://192.168.1.223:30080'
KUBOARD_AGENT_SERVER_UDP_PORT: '30081'
KUBOARD_AGENT_SERVER_TCP_PORT: '30081'
KUBOARD_SERVER_LOGRUS_LEVEL: info # error / debug / trace
# KUBOARD_AGENT_KEY 是 Agent 与 Kuboard 通信时的密钥,请修改为一个任意的包含字母、数字的32位字符串,此密钥变更后,需要删除 Kuboard Agent 重新导入。
KUBOARD_AGENT_KEY: 32b7d6572c6255211b4eec9009e4a816
# 关于如下参数的解释,请参考文档 https:
# [gitlab login]
# KUBOARD_LOGIN_TYPE: "gitlab"
# KUBOARD_ROOT_USER: "your-user-name-in-gitlab"
# GITLAB_BASE_URL: "http://gitlab.mycompany.com"
# GITLAB_APPLICATION_ID: "7c10882aa46810a0402d17c66103894ac5e43d6130b81c17f7f2d8ae182040b5"
# GITLAB_CLIENT_SECRET: "77c149bd3a4b6870bffa1a1afaf37cba28a1817f4cf518699065f5a8fe958889"
# 关于如下参数的解释,请参考文档 https:
# [github login]
# KUBOARD_LOGIN_TYPE: "github"
# KUBOARD_ROOT_USER: "your-user-name-in-github"
# GITHUB_CLIENT_ID: "17577d45e4de7dad88e0"
# GITHUB_CLIENT_SECRET: "ff738553a8c7e9ad39569c8d02c1d85ec19115a7"
# 关于如下参数的解释,请参考文档 https:
# [ldap login]
# KUBOARD_LOGIN_TYPE: "ldap"
# KUBOARD_ROOT_USER: "your-user-name-in-ldap"
# LDAP_HOST: "ldap-ip-address:389"
# LDAP_BIND_DN: "cn=admin,dc=example,dc=org"
# LDAP_BIND_PASSWORD: "admin"
# LDAP_BASE_DN: "dc=example,dc=org"
# LDAP_FILTER: "(objectClass=posixAccount)"
# LDAP_ID_ATTRIBUTE: "uid"
# LDAP_USER_NAME_ATTRIBUTE: "uid"
# LDAP_EMAIL_ATTRIBUTE: "mail"
# LDAP_DISPLAY_NAME_ATTRIBUTE: "cn"
# LDAP_GROUP_SEARCH_BASE_DN: "dc=example,dc=org"
# LDAP_GROUP_SEARCH_FILTER: "(objectClass=posixGroup)"
# LDAP_USER_MACHER_USER_ATTRIBUTE: "gidNumber"
# LDAP_USER_MACHER_GROUP_ATTRIBUTE: "gidNumber"
# LDAP_GROUP_NAME_ATTRIBUTE: "cn"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kuboard-etcd
namespace: kuboard
labels:
app: kuboard-etcd
spec:
serviceName: kuboard-etcd
replicas: 3
selector:
matchLabels:
app: kuboard-etcd
template:
metadata:
name: kuboard-etcd
labels:
app: kuboard-etcd
spec:
containers:
- name: kuboard-etcd
image: swr.cn-east-2.myhuaweicloud.com/kuboard/etcd:v3.4.14
ports:
- containerPort: 2379
name: client
- containerPort: 2380
name: peer
env:
- name: KUBOARD_ETCD_ENDPOINTS
value: >-
kuboard-etcd-0.kuboard-etcd:2379,kuboard-etcd-1.kuboard-etcd:2379,kuboard-etcd-2.kuboard-etcd:2379
volumeMounts:
- name: data
mountPath: /data
command:
- /bin/sh
- -c
- |
PEERS="kuboard-etcd-0=http://kuboard-etcd-0.kuboard-etcd:2380,kuboard-etcd-1=http://kuboard-etcd-1.kuboard-etcd:2380,kuboard-etcd-2=http://kuboard-etcd-2.kuboard-etcd:2380"
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http:
--listen-client-urls http:
--advertise-client-urls http:
--initial-advertise-peer-urls http:
--initial-cluster-token kuboard-etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /data/kuboard.etcd
volumeClaimTemplates:
- metadata:
name: data
spec:
# 请填写一个有效的 StorageClass name
storageClassName: nfs-client
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kuboard-data-pvc
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: kuboard-etcd
namespace: kuboard
spec:
type: ClusterIP
ports:
- port: 2379
name: client
- port: 2380
name: peer
selector:
app: kuboard-etcd
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '9'
k8s.kuboard.cn/ingress: 'false'
k8s.kuboard.cn/service: NodePort
k8s.kuboard.cn/workload: kuboard-v3
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard-v3
namespace: kuboard
spec:
replicas: 1
selector:
matchLabels:
k8s.kuboard.cn/name: kuboard-v3
template:
metadata:
labels:
k8s.kuboard.cn/name: kuboard-v3
spec:
containers:
- env:
- name: KUBOARD_ETCD_ENDPOINTS
value: >-
kuboard-etcd-0.kuboard-etcd:2379,kuboard-etcd-1.kuboard-etcd:2379,kuboard-etcd-2.kuboard-etcd:2379
envFrom:
- configMapRef:
name: kuboard-v3-config
image: 'swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard:v3'
imagePullPolicy: Always
name: kuboard
volumeMounts:
- mountPath: "/data"
name: kuboard-data
volumes:
- name: kuboard-data
persistentVolumeClaim:
claimName: kuboard-data-pvc
---
apiVersion: v1
kind: Service
metadata:
annotations:
k8s.kuboard.cn/workload: kuboard-v3
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard-v3
namespace: kuboard
spec:
ports:
- name: webui
nodePort: 30080
port: 80
protocol: TCP
targetPort: 80
- name: agentservertcp
nodePort: 30081
port: 10081
protocol: TCP
targetPort: 10081
- name: agentserverudp
nodePort: 30081
port: 10081
protocol: UDP
targetPort: 10081
selector:
k8s.kuboard.cn/name: kuboard-v3
sessionAffinity: None
type: NodePort
#如果pvc没有自动创建,手动创建pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kuboard-data-pvc
namespace: kuboard
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
metrics-server
#1.31 用的 metrics-server0.7
#地址:https:
wget https:
#修改配置
containers:
- args:
- --cert-dir=/tmp
- --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls #新增
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.7.1 #修改
kubectl apply -f high-availability-1.21+.yaml
FAQ
#crictl images 警告
WARN[0000] image connect using default endpoints: [unix:
crictl config runtime-endpoint unix:
#container 导入镜像
ctr -n k8s.io image import xxx.tar
#命令补全
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc