3-sealer-自定义CNI网络插件-flannel
一、 资源准备
- 资料地址
- 参考文献:github.com/sealerio/se…
- helm安装包:get.helm.sh/helm-v3.9.0…
- CNI网络插件:github.com/containerne…
- 文件目录如下:
[root@portal139 flannel]# ll
-rw-r--r-- 1 root root 925 Nov 1 14:11 Clusterfile
drwxrwxrwx 2 root root 239 Oct 28 17:05 cni
-rw-r--r-- 1 root root 36689601 Dec 8 2021 cni-plugins-linux-amd64-v0.8.3.tgz
-rw-r--r-- 1 root root 13952532 Oct 31 18:09 helm-v3.9.0-linux-amd64.tar.gz
-rw-r--r-- 1 root root 4005 Oct 31 15:04 init-kube.sh
-rw-r--r-- 1 root root 428 Nov 2 15:11 Kubefile
-rw-r--r-- 1 root root 4813 Nov 1 14:12 kube-flannel.yml
[root@portal139 flannel]# tree
.
├── Clusterfile
├── cni
│ ├── bandwidth
│ ├── bridge
│ ├── dhcp
│ ├── firewall
│ ├── flannel
│ ├── host-device
│ ├── host-local
│ ├── ipvlan
│ ├── loopback
│ ├── macvlan
│ ├── portmap
│ ├── ptp
│ ├── sbr
│ ├── static
│ ├── tuning
│ └── vlan
├── cni-plugins-linux-amd64-v0.8.3.tgz
├── helm-v3.9.0-linux-amd64.tar.gz
├── init-kube.sh
├── Kubefile
└── kube-flannel.yml
[root@portal139 flannel]#
- CNI资料下载准备
- wget github.com/containerne…
- 解压 得到cni文件夹
- 给该文件夹赋予 777 权限
- init-kube.sh初始化脚本准备
#!/bin/bash
# Open ipvs
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- br_netfilter
## version_ge 4.19 4.19 true ;
## version_ge 5.4 4.19 true ;
## version_ge 3.10 4.19 false ;
version_ge(){
test "$(echo "$@" | tr ' ' '\n' | sort -rV | head -n 1)" == "$1"
}
disable_selinux(){
if [ -s /etc/selinux/config ] && grep 'SELINUX=enforcing' /etc/selinux/config; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
fi
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
disable_firewalld() {
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
case "$lsb_dist" in
ubuntu|deepin|debian|raspbian)
command -v ufw &> /dev/null && ufw disable
;;
centos|rhel|ol|sles|kylin|neokylin)
systemctl stop firewalld && systemctl disable firewalld
;;
*)
systemctl stop firewalld && systemctl disable firewalld
echo "unknown system, use default to stop firewalld"
;;
esac
}
kernel_version=$(uname -r | cut -d- -f1)
if version_ge "${kernel_version}" 4.19; then
modprobe -- nf_conntrack
else
modprobe -- nf_conntrack_ipv4
fi
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.rp_filter=0
EOF
sysctl --system
sysctl -w net.ipv4.ip_forward=1
disable_firewalld
swapoff -a || true
disable_selinux
chmod -R 755 ../bin/*
chmod 644 ../bin
cp ../bin/* /usr/bin
cp ../scripts/kubelet-pre-start.sh /usr/bin
#cni
mkdir /opt/cni/bin -p
chmod -R 755 ../cni/*
chmod 644 ../cni
cp ../cni/* /opt/cni/bin
# Cgroup driver
mkdir -p /etc/systemd/system
cp ../etc/kubelet.service /etc/systemd/system/
[ -d /etc/systemd/system/kubelet.service.d ] || mkdir /etc/systemd/system/kubelet.service.d
cp ../etc/10-kubeadm.conf /etc/systemd/system/kubelet.service.d/
[ -d /var/lib/kubelet ] || mkdir -p /var/lib/kubelet/
cat <<EOF > /var/lib/kubelet/config.yaml
address: 0.0.0.0
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: ${criDriver}
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kind: KubeletConfiguration
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeLeaseDurationSeconds: 40
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
systemctl enable kubelet
- cni kube-flannel.yml资源编排准备
- net-conf.json 文件种 Network 对应的值,一定是Clusterfile中 network下podCIDR的值一一对应
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "100.64.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- Helm安装准备(
非必须)
二、自定义集群镜像
- 集群镜像下载、查询
[root@portal139 flannel]# sealer pull kubernetes:v1.19.9-alpine
[root@portal139 flannel]# sealer images
+------------------------------------------+---------------------------+-------+---------+---------------------+----------+
| IMAGE NAME | IMAGE ID | ARCH | VARIANT | CREATE | SIZE |
| kubernetes:v1.19.9-alpine | 9469d6fe5cdb884b8.... | amd64 | | 2022-10-28
+------------------------------------------+---------------------------+-------+---------+---------------------+----------+
[root@portal139 flannel]#
- 自定义Kubefile制作
- 基于kubernetes:v1.19.9-alpine集群镜像进行构建
- 复制cni到集群镜像
- 复制初始化脚本
- 复制cni脚本文件
- 解压Helm,安装Helm,登录镜像仓库,执行cni插件安装
FROM kubernetes:v1.19.9-alpine
COPY cni .
COPY helm-v3.9.0-linux-amd64.tar.gz .
COPY init-kube.sh /scripts/
COPY kube-flannel.yml manifests/
CMD tar zxvf helm-v3.9.0-linux-amd64.tar.gz \
&& cp linux-amd64/helm /usr/bin \
&& chmod +x /usr/bin/helm \
&& docker login dev-bj.hatech.com.cn -u hatech-jg -p Hatech1221! \
&& kubectl apply -f manifests/kube-flannel.yml
- 构建自定义镜像
- 注意上下文 .
- 注意上下文 .
- 注意上下文 .
[root@portal139 flannel]# sealer build -f Kubefile -t dev-bj.hatech.com.cn/library/kubernetes-flannel:v1.19.9-alpine .
2022-10-31 15:50:40 [INFO] [executor.go:123] start to check the middleware file
2022-10-31 15:50:40 [INFO] [executor.go:63] run build layer: COPY cni .
2022-10-31 15:50:41 [INFO] [executor.go:63] run build layer: COPY init-kube.sh /scripts/
2022-10-31 15:50:41 [INFO] [executor.go:63] run build layer: COPY kube-flannel.yml manifests/
2022-10-31 15:50:41 [INFO] [executor.go:95] exec all build instructs success
Pulling image: quay.io/coreos/flannel:v0.14.0
8522d622299c: Download complete
801bfaa63ef2: Download complete
e4264a7179f6: Download complete
bc75ea45ad2e: Download complete
78648579d12a: Download complete
3393447261e4: Download complete
071b96dd834b: Download complete
4de2f0468a91: Download complete
Status: images save success
2022-10-31 15:51:04 [INFO] [build.go:100] build image amd64 dev-bj.hatech.com.cn/library/kubernetes-flannel:v1.19.9-alpine success
[root@portal139 flannel]#
[root@portal139 flannel]# sealer images
+----------------------------------------------------------------+-------------+-------+---------+---------------------+----------+
| IMAGE NAME | IMAGE ID | ARCH | VARIANT | CREATE | SIZE |
+----------------------------------------------------------------+-------------+-------+---------+---------------------+----------+
| dev-bj.hatech.com.cn/library/kubernetes-flannel:v1.19.9-alpine | 114354c4... | amd64 | | 2022-10-31 15:51:04 | 702.30MB |
| kubernetes:v1.19.9-alpine | 9469d6fe... | amd64 | | 2022-10-28 17:22:57 | 616.37MB |
+----------------------------------------------------------------+-------------+-------+---------+---------------------+----------+
- 构建自定义Clusterfile
- podcidr 一定要与kube-flannel 中的 net-conf.json 中的 podcidr 网段一致
- network中,cniName 配置为 flannel
apiVersion: sealer.aliyun.com/v1alpha1
kind: Cluster
metadata:
name: hatech-cluster-flannel
spec:
image: dev-bj.hatech.com.cn/library/kubernetes-flannel:v1.19.9-alpine
provider: BAREMETAL
ssh:
# ssh的登录密码,如果使用的密钥登录则无需设置
passwd: hatech1618
# ssh的私钥文件绝对路径,例如/root/.ssh/id_rsa
pk: /root/.ssh/id_rsa
# ssh的私钥文件密码,如果没有的话就设置为""
pkPasswd: ""
# ssh登录用户
user: root
network:
# 使用的网卡名称
interface: ens192
# 网络插件
cniName: flannel
podCIDR: 100.64.0.0/16
svcCIDR: 10.96.0.0/22
withoutCNI: false
certSANS:
- aliyun-inc.com
- 10.0.0.2
hosts:
- ips:
- 10.1.120.90
roles:
- master
ssh:
port: "22"
- ips:
- 10.1.120.91
- 10.1.120.92
- 10.1.120.93
roles:
- node
ssh:
port: "22"
- 安装集群
- 见到Succeeded in creating a new cluster, enjoy it!表示安装成功
[root@portal139 flannel]# sealer apply -f Clusterfile
2022-10-31 16:10:47 [INFO] [local.go:288] Start to create a new cluster: master [10.1.120.90], worker [10.1.120.91 10.1.120.92 10.1.120.93]
copying files to 10.1.120.92: 47/47
copying files to 10.1.120.93: 47/47
copying files to 10.1.120.91: 47/47
copying files to 10.1.120.90: 180/180
2022-10-31 16:22:01 [INFO] [kube_certs.go:234] APIserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost master90.hatech.com.cn:master90.hatech.com.cn] map[10.1.120.90:10.1.120.90 10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 172.16.0.181:172.16.0.181]}
2022-10-31 16:22:01 [INFO] [kube_certs.go:254] Etcd altnames : {map[localhost:localhost master90.hatech.com.cn:master90.hatech.com.cn] map[10.1.120.90:10.1.120.90 127.0.0.1:127.0.0.1 ::1:::1]}, commonName : mcopying files to 10.1.120.90: 206/206
2022-10-31 16:22:17 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "admin.conf" kubeconfig file
2022-10-31 16:22:18 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
2022-10-31 16:22:18 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "scheduler.conf" kubeconfig file
2022-10-31 16:22:19 [INFO] [kubeconfig.go:267] [kubeconfig] Writing "kubelet.conf" kubeconfig file
++ dirname init-registry.sh
+ cd .
+ REGISTRY_PORT=5000
+ VOLUME=/var/lib/sealer/data/hatech-cluster-flannel/rootfs/registry
+ REGISTRY_DOMAIN=sea.hub
+ container=sealer-registry
+++ pwd
++ dirname /var/lib/sealer/data/hatech-cluster-flannel/rootfs/scripts
+ rootfs=/var/lib/sealer/data/hatech-cluster-flannel/rootfs
+ config=/var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_config.yml
+ htpasswd=/var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_htpasswd
+ certs_dir=/var/lib/sealer/data/hatech-cluster-flannel/rootfs/certs
+ image_dir=/var/lib/sealer/data/hatech-cluster-flannel/rootfs/images
+ mkdir -p /var/lib/sealer/data/hatech-cluster-flannel/rootfs/registry
+ load_images
+ for image in '"$image_dir"/*'
+ '[' -f /var/lib/sealer/data/hatech-cluster-flannel/rootfs/images/registry.tar ']'
+ docker load -q -i /var/lib/sealer/data/hatech-cluster-flannel/rootfs/images/registry.tar
Loaded image: registry:2.7.1
++ docker ps -aq -f name=sealer-registry
+ '[' '' ']'
+ regArgs='-d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key'
+ '[' -f /var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_config.yml ']'
+ sed -i s/5000/5000/g /var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_config.yml
+ regArgs='-d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_config.yml:/etc/docker/registry/config.yml'
+ '[' -f /var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_htpasswd ']'
+ docker run -d --restart=always --net=host --name sealer-registry -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/certs:/certs -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/registry:/var/lib/registry -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/sea.hub.crt -e REGISTRY_HTTP_TLS_KEY=/certs/sea.hub.key -v /var/lib/sealer/data/hatech-cluster-flannel/rootfs/etc/registry_config.yml:/etc/docker/registry/config.yml registry:2.7.1
7c0460c2ad0885d48fab9775afa03a483515281e3e24fed333d00d9d77329990
+ check_registry
+ n=1
+ (( n <= 3 ))
++ docker inspect --format '{{json .State.Status}}' sealer-registry
+ registry_status='"running"'
+ [[ "running" == \"running\" ]]
copying files to 10.1.120.90: 210/210
2022-10-31 16:22:23 [INFO] [init.go:251] start to init master0...
2022-10-31 16:24:45 [INFO] [init.go:206] join command is: kubeadm join apiserver.cluster.local:6443 --token 4o0m6p.c3y004qdhtdr41hh \
--discovery-token-ca-cert-hash sha256:d3ddab7edf56c7126cf2388534a3ba32f6f9cf3f7fac28da07209cef855e3cb8 \
copying files to 10.1.120.92: 49/49
copying files to 10.1.120.93: 49/49
copying files to 10.1.120.91: 49/49
2022-10-31 16:24:45 [INFO] [runtime.go:83] [10.1.120.91 10.1.120.92 10.1.120.93] will be added as worker
2022-10-31 16:24:50 [INFO] [init.go:206] join command is: kubeadm join apiserver.cluster.local:6443 --token 6j188v.lcbg2s6kc7b19vkq --discovery-token-ca-cert-hash sha256:d3ddab7edf56c7126cf2388534a3ba32f6f9cf3f7fac28da07209cef855e3cb8
2022-10-31 16:24:50 [INFO] [masters.go:520] join token: 6j188v.lcbg2s6kc7b19vkq hash: sha256:d3ddab7edf56c7126cf2388534a3ba32f6f9cf3f7fac28da07209cef855e3cb8 certifacate key:
2022-10-31 16:24:50 [INFO] [nodes.go:92] Start to join 10.1.120.93 as worker
2022-10-31 16:24:50 [INFO] [nodes.go:92] Start to join 10.1.120.92 as worker
2022-10-31 16:24:50 [INFO] [nodes.go:92] Start to join 10.1.120.91 as worker
...........................................................................
[preflight] Running pre-flight checks
[preflight] Running pre-flight checks
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING FileExisting-socat]: socat not found in system path
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING FileExisting-socat]: socat not found in system path
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING FileExisting-socat]: socat not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
2022-10-31 16:25:18 [INFO] [nodes.go:115] Succeeded in joining 10.1.120.92 as worker
2022-10-31 16:25:18 [INFO] [nodes.go:115] Succeeded in joining 10.1.120.93 as worker
2022-10-31 16:25:19 [INFO] [nodes.go:115] Succeeded in joining 10.1.120.91 as worker
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
2022-10-31 16:25:19 [INFO] [local.go:298] Succeeded in creating a new cluster, enjoy it!
-
报错解决
- [WARNING IsDockerSystemdCheck]:detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at
- 参见:2-sealer-安装kubernetes集群
-
查看集群安装情况
[root@portal139 flannel]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master90.hatech.com.cn Ready master 155m v1.19.9 10.1.120.90 <none> CentOS Linux 7 (Core) 5.4.220-1.el7.elrepo.x86_64 docker://19.3.14
slave91.hatech.com.cn Ready <none> 154m v1.19.9 10.1.120.91 <none> CentOS Linux 7 (Core) 5.4.220-1.el7.elrepo.x86_64 docker://19.3.14
slave92.hatech.com.cn Ready <none> 154m v1.19.9 10.1.120.92 <none> CentOS Linux 7 (Core) 5.4.220-1.el7.elrepo.x86_64 docker://19.3.14
slave93.hatech.com.cn Ready <none> 154m v1.19.9 10.1.120.93 <none> CentOS Linux 7 (Core) 5.4.220-1.el7.elrepo.x86_64 docker://19.3.14
[root@portal139 flannel]#
[root@portal139 flannel]# kubectl get ns
NAME STATUS AGE
default Active 155m
istorm-one-base Active 48m
kube-node-lease Active 155m
kube-public Active 155m
kube-system Active 155m
- 查看集群安装网络插件情况
- 4个节点都装上了 flannel CNI插件
[root@portal139 flannel]# kubectl -n kube-system get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-55bcc669d7-7sqq8 1/1 Running 2 156m 100.64.0.9 master90.hatech.com.cn <none> <none>
coredns-55bcc669d7-952w8 1/1 Running 2 156m 100.64.0.8 master90.hatech.com.cn <none> <none>
etcd-master90.hatech.com.cn 1/1 Running 3 156m 10.1.120.90 master90.hatech.com.cn <none> <none>
kube-apiserver-master90.hatech.com.cn 1/1 Running 4 156m 10.1.120.90 master90.hatech.com.cn <none> <none>
kube-controller-manager-master90.hatech.com.cn 1/1 Running 3 156m 10.1.120.90 master90.hatech.com.cn <none> <none>
kube-flannel-ds-8nd89 1/1 Running 2 155m 10.1.120.92 slave92.hatech.com.cn <none> <none>
kube-flannel-ds-ghtlf 1/1 Running 2 155m 10.1.120.93 slave93.hatech.com.cn <none> <none>
kube-flannel-ds-lzkk7 1/1 Running 4 155m 10.1.120.90 master90.hatech.com.cn <none> <none>
kube-flannel-ds-mtvd5 1/1 Running 2 155m 10.1.120.91 slave91.hatech.com.cn <none> <none>
kube-lvscare-slave91.hatech.com.cn 1/1 Running 2 155m 10.1.120.91 slave91.hatech.com.cn <none> <none>
kube-lvscare-slave92.hatech.com.cn 1/1 Running 2 155m 10.1.120.92 slave92.hatech.com.cn <none> <none>
kube-lvscare-slave93.hatech.com.cn 1/1 Running 2 155m 10.1.120.93 slave93.hatech.com.cn <none> <none>
kube-proxy-2bkjl 1/1 Running 2 155m 10.1.120.91 slave91.hatech.com.cn <none> <none>
kube-proxy-8v4cr 1/1 Running 2 155m 10.1.120.93 slave93.hatech.com.cn <none> <none>
kube-proxy-kkfsl 1/1 Running 2 155m 10.1.120.92 slave92.hatech.com.cn <none> <none>
kube-proxy-qkwxw 1/1 Running 3 156m 10.1.120.90 master90.hatech.com.cn <none> <none>
kube-scheduler-master90.hatech.com.cn 1/1 Running 3 156m 10.1.120.90 master90.hatech.com.cn <none> <none>
[root@portal139 flannel]#