kubernets学习 -环境搭建_,2024年最新Golang高级工程师面试题及答案

44 阅读3分钟

基础软件安装

yum install wget jq psmisc vim net-tools yum-utils device-mapper-persistent-data lvm2 git lrzsz -y

配置阿里云源

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos8_base.repo
yum clean all
yum makecache
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

配置host

vim /etc/hosts

添加如下配置

192.168.56.101 master1
192.168.56.102 master2
192.168.56.103 master3

设置域名解析服务器

vim /etc/resolv.conf

设置如下

nameserver 114.114.114.114

关闭防火墙

所有节点关闭firewalld 、dnsmasq、NetworkManager、selinux

systemctl disable --now firewalld 
systemctl disable --now dnsmasq
setenforce 0



禁用swap分区

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]\*swap/s@^@#@' /etc/fstab

同步时间

centos7

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com

Centos8使用chrony而不是使用ntpdate

  1. 修改配置文件
vim /etc/chrony.conf

2.centos.pool.ntp.org iburst修改为ntp.aliyun.com iburst,完整配置如下所示

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#pool 2.centos.pool.ntp.org iburst
pool ntp.aliyun.com iburst
# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Enable kernel synchronization of the real-time clock (RTC).
rtcsync

# Enable hardware timestamping on all interfaces that support it.
#hwtimestamp \*

# Increase the minimum number of selectable sources required to adjust
# the system clock.
#minsources 2

# Allow NTP client access from local network.
#allow 192.168.0.0/16

# Serve time even if not synchronized to a time source.
#local stratum 10

# Specify file containing keys for NTP authentication.
keyfile /etc/chrony.keys

# Get TAI-UTC offset and leap seconds from the system tz database.
leapsectz right/UTC

# Specify directory for log files.
logdir /var/log/chrony

# Select which information is logged.
#log measurements statistics tracking


  1. 重新加载配置
systemctl restart chronyd.service

  1. 执行时间同步命令
# chronyc sources -v

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '\*' = current best, '+' = combined, '-' = not combined,
| /             'x' = may be in error, '~' = too variable, '?' = unusable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^\* 203.107.6.88                  2   6     7     0    -30us[ +289us] +/-   26ms


  1. 查看时间
date
Sun Mar 13 15:44:31 CST 2022

master1免密登录其他节点

ssh-keygen -t rsa
 for i in master1 master2 master3 ;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

内核升级

导入ELRepo仓库的公共密钥:

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

安装ELRepo仓库的yum源:

yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm

列出可用的系统内核安装包

yum --disablerepo="\*" --enablerepo="elrepo-kernel" list available

安装最新版内核

yum --enablerepo=elrepo-kernel install kernel-ml

设置以新的内核启动
0 表示最新安装的内核,设置为 0 表示以新版本内核启动:

grub2-set-default 0

生成grub配置文件并重启系统

grub2-mkconfig -o /boot/grub2/grub.cfg
reboot

验证新内核

uname -r

查看系统中的内核

rpm -qa | grep kernel

删除旧内核

yum remove kernel-core-4.18.0 kernel-devel-4.18.0 kernel-tools-libs-4.18.0 kernel-headers-4.18.0

再查看已安装的内核

rpm -qa | grep kernel

安装IPVS

yum install ipvsadm ipset sysstat conntrack libseccomp -y

配置加载模块,注意如果内核是4.18及以下nf_conntrack_ipv4,4.19及以上应该使用nf_conntrack

vim /etc/modules-load.d/ipvs.conf 
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

重新加载配置

systemctl enable --now systemd-modules-load.service

所有节点配置K8s内核

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

重启

reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack

基本组件安装

安装containerd

列出可用的版本

yum list docker-ce --showduplicates | sort -r

yum install docker-ce-20.10.\* docker-ce-cli-20.10.\* containerd.io -y

配置Containerd所需的模块

# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

加载内核模块

modprobe -- overlay
modprobe -- br_netfilter

配置内核

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

加载内核

sysctl --system

创建配置文件

mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml

修改配置文件

vim /etc/containerd/config.toml

修改以下内容

找到containerd.runtimes.runc.options,修改SystemdCgroup = true
将所有的sanbox_image修改为registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6

启动containerd并配置开机启动

systemctl daemon-reload
systemctl enable --now containerd

cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

说明一下Cgroup drivers:systemd cgroupfs 区别
那么 systemd 和 cgroupfs 这两种驱动有什么区别呢?

  1. systemd cgroup driver 是 systemd 本身提供了一个 cgroup 的管理方式,使用systemd 做 cgroup 驱动的话,所有的 cgroup 操作都必须通过 systemd 的接口来完成,不能手动更改 cgroup 的文件
  2. cgroupfs 驱动就比较直接,比如说要限制内存是多少、要用 CPU share 为多少?直接把 pid 写入对应的一个 cgroup 文件,然后把对应需要限制的资源也写入相应的 memory cgroup 文件和 CPU 的 cgroup 文件就可以了

所以可以看出来 systemd 更加安全,因为不能手动去更改 cgroup 文件,当然我们也推荐使用 systemd 驱动来管理 cgroup。

K8s组件安装

下载kubernetes1.23.4 github.com/kubernetes/…

wget https://storage.googleapis.com/kubernetes-release/release/v1.23.4/kubernetes-server-linux-amd64.tar.gz

下载etcd3.5.1 github.com/etcd-io/etc…

https://github.com/etcd-io/etcd/releases/download/v3.5.1/etcd-v3.5.1-linux-amd64.tar.gz

解压kubernetes组件

tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

解压etcd组件

 tar -zxvf etcd-v3.5.1-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.1-linux-amd64/etcd{,ctl}

将组件发送到其他节点

MasterNodes='master2 master3'
for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd\* $NODE:/usr/local/bin/; done

下载CNI github.com/containerne…

wget https://github.com/containernetworking/plugins/releases/download/v1.1.0/cni-plugins-linux-amd64-v1.1.1.tgz

所有节点创建/opt/cni/bin目录

mkdir -p /opt/cni/bin

解压cni并发送到所有节点

tar -zxf cni-plugins-linux-amd64-v1.1.1.tgz -C /opt/cni/bin
for NODE in $MasterNodes; do     ssh $NODE 'mkdir -p /opt/cni/bin';     scp /opt/cni/bin/\* $NODE:/opt/cni/bin/; done

生成证书

master1下载生成证书工具 github.com/cloudflare/…

wget https://github.com/cloudflare/cfssl/releases/tag/v1.6.1/cfssl_1.6.1_linux_amd64 -O /usr/local/bin/cfssl
wget https://github.com/cloudflare/cfssl/releases/tag/v1.6.1/cfssljson_1.6.1_linux_amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/\*

所有节点创建etcd证书目录

mkdir -p /etc/etcd/ssl 

所有节点创建kubernetes目录

mkdir -p /etc/kubernetes/pki 

生成etcd证书

master1生成etcd证书
生成自签证书颁发机构(CA)

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > etcd-ca-csr.json << EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cat > etcd-csr.json << EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

生成证书CSR文件:证书签名请求文件

 cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca

颁发客户端证书

cfssl gencert \
   -ca=/etc/etcd/ssl/etcd-ca.pem \
   -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
   -config=ca-config.json \
   -hostname=127.0.0.1,master1,master2,master3,192.168.56.101,192.168.56.102,192.168.56.103 \
   -profile=kubernetes \
   etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd

将etcd证书复制到其他节点

for NODE in $MasterNodes; do
     ssh $NODE "mkdir -p /etc/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
     done
 done

生成kubernetes证书

cd /etc/kubernetes/pki

生成CA证书

cat > ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca

生成apiserver证书

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > apiserver-csr.json << EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
	-ca=/etc/kubernetes/pki/ca.pem \
	-ca-key=/etc/kubernetes/pki/ca-key.pem \
	-config=ca-config.json \
	-hostname=10.96.0.1,192.168.56.88,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.56.101,192.168.56.102,192.168.56.103 \
	-profile=kubernetes \
	apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver



生成apiserver的聚合证书

cat > front-proxy-ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  },
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cat > front-proxy-client-csr.json  << EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

cfssl gencert   -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca 

cfssl gencert \ 
	-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
	-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
	-config=ca-config.json \
	-profile=kubernetes \
	front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client


生成controller-manager证书

cat > manager-csr.json << EOF

{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

 cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager

配置一个集群项

kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://192.168.56.88:8443 \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

设置一个环境项,一个上下文

kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig


设置一个用户项,set-credentials

kubectl config set-credentials system:kube-controller-manager \
     --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
     --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

使用某个环境当作默认环境

kubectl config use-context system:kube-controller-manager@kubernetes \
    --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

生成scheduler证书

cat > scheduler-csr.json << EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

设置一个集群项

kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://192.168.56.88:8443 \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

设置一个环境项,一个上下文

kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

设置一个用户项,set-credentials

kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

使用某个环境当作默认环境

kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

生成admin证书

cat > admin-csr.json << EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

设置一个集群项

kubectl config set-cluster kubernetes \
	--certificate-authority=/etc/kubernetes/pki/ca.pem \
	--embed-certs=true \
	--server=https://192.168.56.88:8443 \
	--kubeconfig=/etc/kubernetes/admin.kubeconfig


设置一个环境项,一个上下文

kubectl config set-context kubernetes-admin@kubernetes \
	--cluster=kubernetes \
	--user=kubernetes-admin \
	--kubeconfig=/etc/kubernetes/admin.kubeconfig


设置一个用户项,set-credentials

kubectl config set-credentials kubernetes-admin \
	--client-certificate=/etc/kubernetes/pki/admin.pem \
	--client-key=/etc/kubernetes/pki/admin-key.pem \
	--embed-certs=true \
	--kubeconfig=/etc/kubernetes/admin.kubeconfig

使用某个环境当作默认环境

kubectl config use-context kubernetes-admin@kubernetes \
	--kubeconfig=/etc/kubernetes/admin.kubeconfig

生成ServiceAccount Key

openssl genrsa -out /etc/kubernetes/pki/sa.key 2048

openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

发送到其他节点

for NODE in master2 master3; do 
for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do 
scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
done; 
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do 
scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
done;
done

kubernetes组件配置

etcd配置

master1

cat > /etc/etcd/etcd.config.yml << EOF
name: 'master1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.56.101:2380'
listen-client-urls: 'https://192.168.56.101:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.56.101:2380'
advertise-client-urls: 'https://192.168.56.101:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master1=https://192.168.56.101:2380,master2=https://192.168.56.102:2380,master3=https://192.168.56.103:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-output: default
force-new-cluster: false
EOF

master2

cat > /etc/etcd/etcd.config.yml << EOF
name: 'master2'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.56.102:2380'
listen-client-urls: 'https://192.168.56.102:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.56.102:2380'
advertise-client-urls: 'https://192.168.56.102:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master1=https://192.168.56.101:2380,master2=https://192.168.56.102:2380,master3=https://192.168.56.103:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-output: default
force-new-cluster: false
EOF

master3

cat > /etc/etcd/etcd.config.yml << EOF
name: 'master3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.56.103:2380'
listen-client-urls: 'https://192.168.56.103:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.56.103:2380'
advertise-client-urls: 'https://192.168.56.103:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master1=https://192.168.56.101:2380,master2=https://192.168.56.102:2380,master3=https://192.168.56.103:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-output: default
force-new-cluster: false
EOF

创建service文件

cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/\* /etc/kubernetes/pki/etcd/
systemctl daemon-reload
systemctl enable --now etcd


export  ETCDCTL_API=3
etcdctl --endpoints="192.168.56.101:2379,192.168.56.102:2379,192.168.56.103:2379" \
	--cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem \
	--cert=/etc/kubernetes/pki/etcd/etcd.pem \
	--key=/etc/kubernetes/pki/etcd/etcd-key.pem \
	endpoint status --write-out=table

高可用配置

所有节点安装keepalived和haproxy

yum install keepalived haproxy -y

haproxy配置

cat > /etc/haproxy/haproxy.cfg  << EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind \*:33305
  mode http
  option httplog


![img](https://p9-xtjj-sign.byteimg.com/tos-cn-i-73owjymdk6/ad8b6402d19346eba2ddc3eab8641faa~tplv-73owjymdk6-jj-mark-v1:0:0:0:0:5o6Y6YeR5oqA5pyv56S-5Yy6IEAg5py65Zmo5a2m5Lmg5LmL5b-DQUk=:q75.awebp?rk3s=f64ab15b&x-expires=1771262264&x-signature=IjJIUZaMMoKPASacR6k2Cz9RTCo%3D)
![img](https://p9-xtjj-sign.byteimg.com/tos-cn-i-73owjymdk6/35b7db74b7ff478b8e01c19184e83aab~tplv-73owjymdk6-jj-mark-v1:0:0:0:0:5o6Y6YeR5oqA5pyv56S-5Yy6IEAg5py65Zmo5a2m5Lmg5LmL5b-DQUk=:q75.awebp?rk3s=f64ab15b&x-expires=1771262264&x-signature=FF8RpE2aOvDT0Avk9lk7uLiFVwY%3D)
![img](https://p9-xtjj-sign.byteimg.com/tos-cn-i-73owjymdk6/55629c7bec684c0c98e2f1bb0d8af9b0~tplv-73owjymdk6-jj-mark-v1:0:0:0:0:5o6Y6YeR5oqA5pyv56S-5Yy6IEAg5py65Zmo5a2m5Lmg5LmL5b-DQUk=:q75.awebp?rk3s=f64ab15b&x-expires=1771262264&x-signature=aPlujQ%2BOG2himYsd3A1f%2F%2F%2FXkyw%3D)

**既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上Go语言开发知识点,真正体系化!**

**由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新**

**[如果你需要这些资料,可以戳这里获取](https://gitee.com/vip204888)**