1.前期准备
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
# 关闭 selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config #永久
setenforce 0 # 临时
# 关闭 swap
swapoff -a #临时
sed -ri 's/.*swap.*/#&/' /etc/fstab #永久
swapon -v # 检查 输出为空,表示swap已关闭
# 加载 ip_vs 模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
# 修改主机名信息
hostnamectl set-hostname k8smaster
hostnamectl set-hostname k8snode1
hostnamectl set-hostname k8snode2
hostnamectl status
# 配置host
vim /etc/hosts
192.168.0.7 k8smaster
192.168.0.8 k8snode1
192.168.0.9 k8snode2
# 配置网络转发
cat > /etc/sysctl.d/k8s.conf << EOF
#开启网桥模式,可将网桥的流量传递给iptables链
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
#关闭ipv6协议,如果支持ipv6可不需要关闭
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
#生效参数
sysctl --system
# 时间间步
yum install ntpdate -y
ntpdate ntp.aliyun.com
# 先把多台内网服务器的 免密打通
master节点
# 生成公钥和私钥 有提示一直回车即可
ssh-keygen -t rsa
# 得到 如果~/.ssh目录一下已经有就不用再生成
id_rsa id_rsa.pub
# 发送id_rsa.pub 到node节点服务器上
ssh-copy-id root@xx.xx.xx.xx
# 输入密码即可实现免密
2.安装docker
3.设置k8s源
/etc/yum.repos.d/kubernetes.repo
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
4.安装所需组件 kublet kubeadm kubectl
master 和 node 服务器都安装
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
5.开机启动
systemctl enable kubelet.service
6.初始化与启动 kubeadm init
方式1 直接启动
注意 ip要使用内网ip
kubeadm init \
--apiserver-advertise-address=192.168.0.7 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.18.0 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
--token-ttl=0
方法2 命令生成
需要修改 kube-proxy 的 configmap,开启 ipvs kubectl edit cm kube-proxy -n=kube-system 修改mode: ipvs
kubeadm config print init-defaults > /opt/kubeadm-config.yaml
cd /opt/
vim kubeadm-config.yaml
......
11 localAPIEndpoint:
12 advertiseAddress: 192.168.0.7 #指定master节点的IP地址
13 bindPort: 6443
......
34 kubernetesVersion: v1.20.15 #指定kubernetes版本号
35 networking:
36 dnsDomain: cluster.local
37 podSubnet: "10.244.0.0/16" #指定pod网段,10.244.0.0/16用于匹配flannel默认网段
38 serviceSubnet: 10.96.0.0/16 #指定service网段
39 scheduler: {}
#末尾再添加以下内容
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs #把默认的kube-proxy调度方式改为ipvs模式
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
#--upload-certs 参数可以在后续执行加入节点时自动分发证书文件
#tee kubeadm-init.log 用以输出日志
#查看 kubeadm-init 日志
less kubeadm-init.log
#kubernetes配置文件目录
ls /etc/kubernetes/
#存放ca等证书和密码的目录
ls /etc/kubernetes/pki
7.启动后 配置
#控制台输出
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#复制在master 终端执行,配置全局参数
并且输出 join xxxx 复制到node节点执行加入
8.安装 网络插件flannel
master 上直接执行 kubectl apply -f raw.githubusercontent.com/coreos/flan…
9.测试
# 检查master 与node 是否互联正常
kubectl get nodes # 查看当前的节点
kubectl get cs # 检查健康
# 测试 pod 资源创建
kubectl create deployment nginx --image=nginx
kubectl get pods -o wide
# 暴露端口提供服务
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get svc
# 扩展2个副本
kubectl scale deployment nginx --replicas=2
kubectl get pods -o wide
注意
# 如果启动提示
To see the stack trace of this error execute with --v=5 or higher
# 应该是本地的版本过高,先删除后再安装
kubectl version # 查看版本
yum remove -y kubelet kubeadm kubectl
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
# 重启命令 安装失败的操作
kubeadm reset -f
rm -rf ~/.kube
rm -fr ~/.kube/ /etc/kubernetes/* var/lib/etcd/*
# 重新生成token
kubeadm token create --print-join-command
10.部署图形后台 管理
# 创建空间
kubectl create ns kubernetes-dashboard
# 下载与配置 dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-bate8/aio/deploy/recommended.yaml
kubectl apply -f recommended.yaml
# 编辑配置
vi recommended.yaml
# 默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30002 #添加
type: NodePort #添加
selector:
k8s-app: kubernetes-dashboard
# 启动服务
kubectl apply -f recommended.yaml
# 忽略检验
kubectl apply -f recommended.yaml --validate=false
# 查看运行情况
kubectl get pods -n kubernetes-dashboard
kubectl get pods,svc -n kubernetes-dashboard
kubectl get svc
创建方式1 命令创建
# 创建service account并绑定默认cluster-admin管理员集群角色
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
## 获取秘钥
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
创建方式2 使用yml
service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
## 启动
kubectl apply -f service-account.yaml
## 查看登录的token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
测试
使用输出的token登录Dashboard ,使用firefox打开,谷歌有限制