一、配置运行环境
echo net.bridge.bridge-nf-call-iptables = 1 >>/etc/sysctl.conf
echo net.ipv4.ip_forward=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-iptables=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-ip6tables=1 >>/etc/sysctl.conf
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat>>/etc/yum.repos.d/kubrenetes.repo<<EOF
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
yum makecache
yum install -y containerd
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
containerd config default | sudo tee /etc/containerd/config.toml
sed -ri 's#(sandbox_image = ")(registry.k8s.io)(.*)#\1registry.cn-hangzhou.aliyuncs.com/google_containers\3#g' /etc/containerd/config.toml
sed -i "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml
cat /etc/containerd/config.toml |egrep "sandbox_image|SystemdCgroup"
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 0
debug: false
pull-image-on-create: false
EOF
systemctl start containerd.service
systemctl enable containerd.service
yum install kubelet-1.24.9 kubeadm-1.24.9 kubectl-1.24.9 -y
yum -y install ipvsadm ipset
systemctl enable kubelet && systemctl start kubelet
二、master服务器kubeadm init
kubeadm init --kubernetes-version=v1.24.9 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --image-repository='registry.cn-hangzhou.aliyuncs.com/google_containers'
三、安装calico网络
wget https://projectcalico.docs.tigera.io/archive/v3.25/manifests/calico.yaml
kubectl apply -f calico.yaml
四、配置NFS默认存储
1、nfs服务器安装nfs与配置
yum -y install nfs-utils rpcbind
systemctl start rpcbind.service
systemctl start nfs
mkdir /data/nfs -p
chown nfsnobody.nfsnobody /data/nfs
NFS_SERVER_IP=`ip a s eth0|awk -F" "+ 'NR==3{print $3}'`
cat>>/etc/exports<<EOF
/data/nfs ${NFS_SERVER_IP}(rw,sync,no_root_squash,no_all_squash)
EOF
exportfs -arv
2、其它客户端服务器安装nfs-utils
yum install -y nfs-utils
3、nfs动态存储的下载及yaml配置
3.1 下载yaml配置
wget https://raw.githubusercontent.com/kubernetes-retired/external-storage/master/nfs-client/deploy/rbac.yaml
wget https://raw.githubusercontent.com/kubernetes-retired/external-storage/master/nfs-client/deploy/class.yaml
wget https://raw.githubusercontent.com/kubernetes-retired/external-storage/master/nfs-client/deploy/deployment.yaml
3.3 修改class为默认存储
[root@master nfs]
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: fuseim.pri/ifs
parameters:
archiveOnDelete: "false"
3.3 导入rbac与class
kubectl apply -f class.yaml
kubectl apply -f rbac.yaml
3.4 修改deploymet文件
[root@master ~]
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2#修改镜像
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 172.16.208.176
- name: NFS_PATH
value: /data/nfs
volumes:
- name: nfs-client-root
nfs:
server: 172.16.208.176
path: /data/nfs
3.4 测试
[root@master nfs]
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
[root@master nfs]
persistentvolumeclaim/test-claim created
[root@master nfs]
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Bound pvc-67c02ed1-0501-43f7-bfc9-f48e0b23d03f 1Mi RWX managed-nfs-storage 8s
[root@master nfs]
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-67c02ed1-0501-43f7-bfc9-f48e0b23d03f 1Mi RWX Delete Bound default/test-claim managed-nfs-storage 11s
五、helm安装
1.查看Helm版本与kubernetes版本兼容性
helm.sh/docs/topics…
2.下载地址
github.com/helm/helm/r…
3.下载及安装
wget https://get.helm.sh/helm-v3.12.1-linux-amd64.tar.gz
tar xf helm-v3.12.1-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/
helm version
4.添加常用的仓库
helm repo add stable https:
helm repo add kaiyuanshe http:
helm repo add azure http:
helm repo add dandydev https:
helm repo add bitnami https:
helm repo list