一、环境准备
1、系统要求
1)每台机器 2 GB 或更多的 RAM (如果少于这个数字将会影响您应用的运行内存
2) 2 CPU 核或更多
3) 集群中的所有机器的网络彼此均能相互连接(公网和内网都可以) 节点之中不可以有重复的主机名、MAC 地址或 product_uuid。
2、/etc/hosts配置
所有节点都配置:主节点名称k8s-master,子节点名称k8s-node
192.168.137.54 k8s-master
192.168.137.60 k8s-node
3、主机名配置
主节点执行命令:
hostnamectl --static set-hostname k8s-master
子节点执行命令:
hostnamectl --static set-hostname k8s-node
4、关闭防火墙
永久关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
5、禁用swap
sed -i '/ swap / s/^/#/' /etc/fstab
swapoff -a
6、关闭selinux,重启生效
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
7、官方说是有些用户反映CentOS的BUG
一些 RHEL/CentOS 7 的用户曾经遇到过问题:由于 iptables 被绕过而导致流量无法正确路由的问题。您应该确保 在 sysctl 配置中的 net.bridge.bridge-nf-call-iptables 被设置为 1。
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
modprobe br_netfilter
8、配置kubernetes的yum源
所有节点都配置
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
二、安装docker-ce
1、安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
2、添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
3、更新缓存
sudo yum makecache fast
4、安装docker-ce
查找Docker-CE的版本
yum list docker-ce --showduplicates | sort -r
[root@localhost ~]# yum list docker-ce --showduplicates | sort -r
* updates: mirror.bit.edu.cn
Loading mirror speeds from cached hostfile
Loaded plugins: fastestmirror
* extras: mirror.bit.edu.cn
docker-ce.x86_64 3:19.03.9-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.8-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.7-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.6-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.5-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.4-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.3-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.2-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.1-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.13-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.12-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.11-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.10-3.el7 docker-ce-stable
docker-ce.x86_64 3:19.03.0-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.9-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.8-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.7-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.6-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.5-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.4-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.3-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.2-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.1-3.el7 docker-ce-stable
docker-ce.x86_64 3:18.09.0-3.el7 docker-ce-stable
docker-ce.x86_64 18.06.3.ce-3.el7 docker-ce-stable
docker-ce.x86_64 18.06.2.ce-3.el7 docker-ce-stable
docker-ce.x86_64 18.06.1.ce-3.el7 docker-ce-stable
docker-ce.x86_64 18.06.0.ce-3.el7 docker-ce-stable
docker-ce.x86_64 18.03.1.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 18.03.0.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.12.1.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.12.0.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.09.1.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.09.0.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.06.2.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.06.1.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.06.0.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.03.3.ce-1.el7 docker-ce-stable
docker-ce.x86_64 17.03.2.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable
docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable
* base: mirrors.huaweicloud.com
Available Packages
安装18.06.3.ce版本,格式:sudo yum -y install docker-ce-[VERSION]
sudo yum -y install docker-ce-18.06.3.ce
5、配置镜像加速,cgroup 驱动
mkdir -p /etc/docker
cat << EOF >> /etc/docker/daemon.json
{
"registry-mirrors": ["https://vm2kvnvb.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
释义:
"registry-mirrors": ["bk6kzfqm.mirror.aliyuncs.com"\], # 配置阿里云镜像加速
6、开启Docker服务
systemctl enable docker && systemctl start docker
7、查看版本
[root@localhost ~]# docker version
Client:
Version: 18.06.3-ce
API version: 1.38
Go version: go1.10.3
Git commit: d7080c1
Built: Wed Feb 20 02:26:51 2019
OS/Arch: linux/amd64
Experimental: false
Server:
Engine:
Version: 18.06.3-ce
API version: 1.38 (minimum version 1.12)
Go version: go1.10.3
Git commit: d7080c1
Built: Wed Feb 20 02:28:17 2019
OS/Arch: linux/amd64
Experimental: false
三、安装kubeadm、kubelet、kubectl
所有节点都执行
1、查看版本
[root@k8s-master ~]# yum list kubeadm --showduplicates | sort
Available Packages
* base: mirrors.huaweicloud.com
* extras: mirror.bit.edu.cn
Installed Packages
kubeadm.x86_64 1.10.0-0 kubernetes
kubeadm.x86_64 1.10.10-0 kubernetes
kubeadm.x86_64 1.10.1-0 kubernetes
kubeadm.x86_64 1.10.11-0 kubernetes
kubeadm.x86_64 1.10.12-0 kubernetes
...
kubeadm.x86_64 1.17.13-0 kubernetes
kubeadm.x86_64 1.17.2-0 kubernetes
kubeadm.x86_64 1.17.3-0 kubernetes
kubeadm.x86_64 1.17.4-0 kubernetes
kubeadm.x86_64 1.17.5-0 kubernetes
kubeadm.x86_64 1.17.6-0 kubernetes
kubeadm.x86_64 1.17.7-0 kubernetes
kubeadm.x86_64 1.17.7-1 kubernetes
kubeadm.x86_64 1.17.8-0 kubernetes
kubeadm.x86_64 1.17.9-0 kubernetes
kubeadm.x86_64 1.18.0-0 kubernetes
kubeadm.x86_64 1.18.10-0 kubernetes
kubeadm.x86_64 1.18.1-0 kubernetes
kubeadm.x86_64 1.18.2-0 kubernetes
kubeadm.x86_64 1.18.3-0 kubernetes
kubeadm.x86_64 1.18.4-0 kubernetes
kubeadm.x86_64 1.18.4-1 kubernetes
kubeadm.x86_64 1.18.5-0 kubernetes
kubeadm.x86_64 1.18.6-0 @kubernetes
kubeadm.x86_64 1.18.6-0 kubernetes
kubeadm.x86_64 1.18.8-0 kubernetes
kubeadm.x86_64 1.18.9-0 kubernetes
kubeadm.x86_64 1.19.0-0 kubernetes
kubeadm.x86_64 1.19.1-0 kubernetes
kubeadm.x86_64 1.19.2-0 kubernetes
kubeadm.x86_64 1.19.3-0 kubernetes
kubeadm.x86_64 1.6.0-0 kubernetes
kubeadm.x86_64 1.6.10-0 kubernetes
kubeadm.x86_64 1.6.1-0 kubernetes
...
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* updates: mirror.bit.edu.cn
2、安装指定版本
此处安装的是1.18.6版本,可以自定义安装其他版本,格式:kubeadm-\<VERSION\>
yum install -y kubeadm-1.18.6 kubelet-1.18.6 kubectl-1.18.6 --disableexcludes=kubernetes
设置kubelet开机启动
systemctl enable kubelet && systemctl start kubelet
3、初始化主节点
kubeadm init --kubernetes-version 1.18.6 --control-plane-endpoint 192.168.137.54 --image-repository registry.cn-hangzhou.aliyuncs.com/k8s-dkr --pod-network-cidr=10.244.0.0/16
注:
--kubernetes-version:kubernetes版本
--control-plane-endpoint:控制平面端点
--image-repository:镜像仓库,注意此仓库中要有k8s需要的相关组件。如果仓库为私有库,需要先登录,而后在初始化
--pod-network-cidr: 指定pod网络的IP地址范围。如果设置,控制平面将为每个节点自动分配CIDR。此处设置的是flannel网络插件地址,如果安装其他网络插件,参考其他插件配置值
注:工程上可以使用配置文件初始化
1)生成配置文件
kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml
2) 查看需要的镜像
kubeadm config images list --config kubeadm.yml
3)提前拉取镜像
kubeadm config images pull --config kubeadm.yml
4)初始化
kubeadm init --config=kubeadm.yml
4、初始化成功
如果初始化成功,可以看到最后有以下信息:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.137.54:6443 --token fkvcaq.28l267ky0xki97xf \
--discovery-token-ca-cert-hash sha256:fbbba2711e8eecd7726a4de9571d70089813f9b89983b97496aca88f7242aa98 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.137.54:6443 --token fkvcaq.28l267ky0xki97xf \
--discovery-token-ca-cert-hash sha256:fbbba2711e8eecd7726a4de9571d70089813f9b89983b97496aca88f7242aa98
按照输出的引导信息,执行操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
安装网络插件,此处选择的是flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
所有子节点上执行初始化输出的最后两行命令,加入集群
kubeadm join 192.168.137.54:6443 --token fkvcaq.28l267ky0xki97xf \
--discovery-token-ca-cert-hash sha256:fbbba2711e8eecd7726a4de9571d70089813f9b89983b97496aca88f7242aa98
如果忘记token或者token过期的情况下,执行以下命令重新生成token
kubeadm token create --print-join-command
5、初始化报错
初始化错误修复后,执行kubeadm reset重置到初始化之前状态,重新初始化
1)cgroup错误
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd".
Please follow the guide at https://kubernetes.io/docs/setup/cri/
需要修改docker驱动:
cat <<EOF> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
重启docker
systemctl restart docker
6、kubectl命令报错
错误:
k8s The connection to the server 192.168.x.x:6443 was refused - did you specify the right host or port?
错误原因,可能是初始化后,没有执行以下操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
如果是子节点报此错,需要将主节点 /etc/kubernetes/admin.conf 配置复制到主节点对应目录,然后在执行上述操作
7、flannel安装报错
如果直接使用以下命令安装flannel,可能报错,根本原因是flannel镜像拉取失败
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
解决方法:
使用以下命令,下载kube-fannel.yml
curl -o kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
查找一个可以拉取fannel对应版本的源,编辑文件中fannel为这个地址:
即将 quay.io/coreos/flannel:v0.13.0 修改为可拉取到的镜像地址
例如:
registry.cn-hangzhou.aliyuncs.com/k8s-dkr/flannel:v0.13.0
注意:执行 kubectl apply -f 命令之前,先登录docker login
原fannel配置文件, 有两处需要修改
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
四、最终结果
所有pod状态都是Running
[root@k8s-master kubernetes]# kubectl get pod --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-7b5f7fc94c-87xlt 1/1 Running 0 63m 10.244.0.3 k8s-master <none> <none>
kube-system coredns-7b5f7fc94c-bfp8s 1/1 Running 0 63m 10.244.0.2 k8s-master <none> <none>
kube-system etcd-k8s-master 1/1 Running 1 63m 192.168.137.108 k8s-master <none> <none>
kube-system kube-apiserver-k8s-master 1/1 Running 1 63m 192.168.137.108 k8s-master <none> <none>
kube-system kube-controller-manager-k8s-master 1/1 Running 1 63m 192.168.137.108 k8s-master <none> <none>
kube-system kube-flannel-ds-hqj44 1/1 Running 0 29m 192.168.137.108 k8s-master <none> <none>
kube-system kube-flannel-ds-z65bn 1/1 Running 0 20m 192.168.137.150 k8s-node <none> <none>
kube-system kube-proxy-25wtg 1/1 Running 1 63m 192.168.137.108 k8s-master <none> <none>
kube-system kube-proxy-jh4j8 1/1 Running 0 20m 192.168.137.150 k8s-node <none> <none>
kube-system kube-scheduler-k8s-master 1/1 Running 1 63m 192.168.137.108 k8s-master <none> <none>