系统准备
查看系统版本,所有节点
[root@master]# cat /etc/centos-release
CentOS Linux release 7.5.1804 (Core)配置网络,所有节点
[root@master ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
NM_CONTROLLED=no
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=1c90a9de-f52a-44cf-b755-df1eacdef4c3
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.101.200
GATEWAY=192.168.101.193
DNS1=192.168.101.193
DNS2=202.103.24.68
NETMASK=255.255.255.0添加阿里源,所有节点
[root@master ~]# rm -rfv /etc/yum.repos.d/*
[root@master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo配置主机名,分发到各节点
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.101.200 master
192.168.101.201 node01
192.168.101.202 node02修改各主机别名
# nmcli g hostname master配置免密钥,master上执行即可
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub node01
# ssh-copy-id -i /root/.ssh/id_rsa.pub node02关闭swap,注释swap分区,所有节点
[root@master ~]# swapoff -a
[root@master ~]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Mon Aug 20 17:52:24 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=aa102231-61d2-47d7-9108-5c5f91f780a5 /boot xfs defaults 0 0
# /dev/mapper/centos-swap swap swap defaults 0 0配置内核参数,将桥接的IPv4流量传递到iptables的链,所有节点
[root@master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# echo 1 > /proc/sys/net/ipv4/ip_forward
# sysctl --system
安装常用包
所有节点
[root@master ~]# yum install vim bash-completion net-tools gcc -y使用aliyun源安装docker-ce
所有节点
[root@master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@master ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master ~]# yum -y install docker-ce安装docker-ce如果出现以下错
[root@master ~]# yum -y install docker-ce
CentOS-8 - Base - mirrors.aliyun.com 14 kB/s | 3.8 kB 00:00
CentOS-8 - Extras - mirrors.aliyun.com 6.4 kB/s | 1.5 kB 00:00
CentOS-8 - AppStream - mirrors.aliyun.com 16 kB/s | 4.3 kB 00:00
Docker CE Stable - x86_64 40 kB/s | 22 kB 00:00
Error:
Problem: package docker-ce-3:19.03.8-3.el7.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed
- cannot install the best candidate for the job
- package containerd.io-1.2.10-3.2.el7.x86_64 is excluded
- package containerd.io-1.2.13-3.1.el7.x86_64 is excluded
- package containerd.io-1.2.2-3.3.el7.x86_64 is excluded
- package containerd.io-1.2.2-3.el7.x86_64 is excluded
- package containerd.io-1.2.4-3.1.el7.x86_64 is excluded
- package containerd.io-1.2.5-3.1.el7.x86_64 is excluded
- package containerd.io-1.2.6-3.3.el7.x86_64 is excluded
(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)解决方法
[root@master ~]# wget https://download.docker.com/linux/centos/7/x86_64/edge/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
[root@master ~]# yum install containerd.io-1.2.6-3.3.el7.x86_64.rpm然后再安装docker-ce即可成功
添加aliyundocker仓库加速器
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["你自己的仓库加速地址"]
}
EOF
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker安装kubectl、kubelet、kubeadm
所有节点
添加阿里kubernetes源
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF安装
[root@master ~]# yum install kubectl kubelet kubeadm
[root@master ~]# systemctl enable kubelet初始化k8s集群
仅在master节点
[root@master ~]# kubeadm init --kubernetes-version=1.18.0 \
--apiserver-advertise-address=192.168.122.21 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.10.0.0/16 --pod-network-cidr=10.122.0.0/16POD的网段为: 10.122.0.0/16, api server地址就是master本机IP。
这一步很关键,由于kubeadm 默认从官网k8s.grc.io下载所需镜像,国内无法访问,因此需要通过–image-repository指定阿里云镜像仓库地址。
第一次初始化会比较慢 需要拉取镜像 且没有什么提示,会停在
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'一段时间,请等待。
记录生成的最后部分内容,此内容需要在其它节点加入Kubernetes集群时执行。
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.101.200:6443 --token r6poan.94obqxb533or2guy \
--discovery-token-ca-cert-hash sha256:1a53b9795f16d3ac1ca4542a56d0e68aef6ca4a1155c969bc05acdab16963515 根据提示创建kubectl
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config执行下面命令,使kubectl可以自动补充
# echo "source <(kubectl completion bash)" >> ~/.bashrc查看节点,pod
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master01.paas.com NotReady master 2m29s v1.18.0
[root@master ~]# kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7ff77c879f-fsj9l 0/1 Pending 0 2m12s
kube-system coredns-7ff77c879f-q5ll2 0/1 Pending 0 2m12s
kube-system etcd-master01.paas.com 1/1 Running 0 2m22s
kube-system kube-apiserver-master01.paas.com 1/1 Running 0 2m22s
kube-system kube-controller-manager-master01.paas.com 1/1 Running 0 2m22s
kube-system kube-proxy-th472 1/1 Running 0 2m12s
kube-system kube-scheduler-master01.paas.com 1/1 Running 0 2m22snode节点为NotReady,因为corednspod没有启动,缺少网络pod
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/安装calico网络
master上执行
[root@master ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created查看pod和node
[root@master ~]# kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-555fc8cc5c-k8rbk 1/1 Running 0 36s
kube-system calico-node-5km27 1/1 Running 0 36s
kube-system coredns-7ff77c879f-fsj9l 1/1 Running 0 5m22s
kube-system coredns-7ff77c879f-q5ll2 1/1 Running 0 5m22s
kube-system etcd-master01.paas.com 1/1 Running 0 5m32s
kube-system kube-apiserver-master01.paas.com 1/1 Running 0 5m32s
kube-system kube-controller-manager-master01.paas.com 1/1 Running 0 5m32s
kube-system kube-proxy-th472 1/1 Running 0 5m22s
kube-system kube-scheduler-master01.paas.com 1/1 Running 0 5m32s
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready master 5m47s v1.18.0此时集群状态正常
安装kubernetes-dashboard
下载安装dashboard用yaml文件
# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml修改kubernetes-dashboard的service类型为NodePort类型,使用nodeport方式访问Dashboard
[root@master dashboard]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort # 新增
ports:
- port: 443
targetPort: 8443
nodePort: 30443 # 新增
selector:
k8s-app: kubernetes-dashboard安装Dashboard
[root@master dashboard]# kubectl create -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created确认Dashboard 关联pod和service的状态
[root@master dashboard]# kubectl get pod,svc -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
pod/dashboard-metrics-scraper-c79c65bb7-bpnbq 1/1 Running 0 2m52s
pod/kubernetes-dashboard-56484d4c5-cthdm 1/1 Running 0 2m52s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/dashboard-metrics-scraper ClusterIP 10.105.74.63 8000/TCP 2m52s
service/kubernetes-dashboard NodePort 10.98.84.244 443:30444/TCP 2m52s创建serviceaccount和clusterrolebinding资源YAML文件
默认Dashboard为最小RBAC权限,添加集群管理员权限以便从Dashboard操作集群资源
[root@master dashboard]# vim adminuser.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard创建admin-user并且赋予admin-user集权管理员权限
[root@master dashboard]# kubectl create -f adminuser.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created浏览器访问Dashboard UI
1. 浏览器访问https://IP:30443
2. 查看token
获取token,用于登录Dashboard UI
[root@master dashboard]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
Name: admin-user-token-k4gdg
Namespace: kubernetes-dashboard
Labels:
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: d116f560-15a2-45ca-930f-40f4fc12ce44
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1025 bytes
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlNEa2dTVGZhM09xd0MyNWtqaGFoZEc5R0NuYnVsZ0FfVlJQODNaQUFhZjgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs0Z2RnIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkMTE2ZjU2MC0xNWEyLTQ1Y2EtOTMwZi00MGY0ZmMxMmNlNDQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.qn98x11n4rPUGkDBU6ceImElgeVbM-b2SeXeeiUEm0rj41_vWXzlpd-r1Z1leuRHuveYnLpquR3QhMlFdjxLAIVAQ83KnDNhHyXYY08ZFeoGqGqlOWIAI-OCS9_IhClIskmmqYwA0kQ5AkHWbEsCKEMiYL-dZH7ECPziV0icFfBIYa6zK8-RLUBHR56rvzgjcap1WeTPdu84vr1jl8a4ZLMrzdwW_WmC4rsesA67DH6cQLgoKZRejGf6Sp4h7izO3DEwcGCUrNbg8biDRoqJwzusKoM7IJbC_C14Omg1kGrozFrMufHs8n7ujjpyuLeUyGjseX9eazlnyNkAwY0XIw3. 登录Dashboard UI
输入第二部获取到的token值,点击登录按钮
Dashboard 画面如下
node工作节点加载
在node节点执行上面初始化时生成的join命令
# kubeadm join 192.168.101.200:6443 --token r6poan.94obqxb533or2guy \
--discovery-token-ca-cert-hash sha256:1a53b9795f16d3ac1ca4542a56d0e68aef6ca4a1155c969bc05acdab16963515# 输出
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.在master查看
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 21h v1.18.5
node01 Ready 16h v1.18.5
node02 Ready 16h v1.18.5在node节点查看
[root@node01 ~]# kubectl get nodes
Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
# 如果报错,需要将master的admin.conf拷贝过来
# master执行
scp /etc/kubernetes/admin.conf root@local3:/etc/kubernetes/
# 然后在node执行下面三步
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
再次在node查看
[root@node02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 21h v1.18.5
node01 Ready 16h v1.18.5
node02 Ready 16h v1.18.5这里可以看到新添加的node节点roles角色缺失,手动添加为worker角色
[root@master ~]# kubectl label node node01 node-role.kubernetes.io/worker=worker
node/node01 labeled
[root@master ~]# kubectl label node node02 node-role.kubernetes.io/worker=worker
node/node02 labeled[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 21h v1.18.5
node01 Ready worker 16h v1.18.5
node02 Ready worker 16h v1.18.5