kubernetes-二进制部署-kubeasz工具部署集群

419 阅读8分钟

机器清单

类型服务器IP主机名
Harbor1172.31.7.104k8s-harbor1.magedu.net
Harbor2172.31.7.105k8s-harbor2.magedu.net
K8S Master1172.31.7.101k8s-master1.magedu.net
K8S Master2172.31.7.102k8s-master2.magedu.net
K8S Master3172.31.7.103k8s-master3.magedu.net
K8S Node1172.31.7.111k8s-node1.magedu.net
K8S Node2172.31.7.112k8s-node2.magedu.net
K8S Node3172.31.7.113k8s-node3.magedu.net
etcd01172.31.7.106k8s-etcd1.magedu.net
etcd02172.31.7.107k8s-etcd2.magedu.net
etcd03172.31.7.108k8s-etcd3.magedu.net

参考kubeasz(使用Ansible脚本安装K8S集群)

github.com/easzlab/kub…

全局配置

vim /etc/hosts

172.31.7.104 harbor01 k8s-harbor1.magedu.net harbor.linuxarchitect.io harbor.linuxarchitect.io
172.31.7.105 harbor02 k8s-harbor2.magedu.net
172.31.7.101 master01 k8s-master1.magedu.net
172.31.7.102 master02 k8s-master2.magedu.net
172.31.7.103 master03 k8s-master3.magedu.net
172.31.7.111 node01 k8s-node1.magedu.net
172.31.7.112 node02 k8s-node2.magedu.net
172.31.7.113 node03 k8s-node3.magedu.net
172.31.7.106 etcd01 k8s-etcd1.magedu.net
172.31.7.107 etcd02 k8s-etcd2.magedu.net
172.31.7.108 etcd03 k8s-etcd3.magedu.net

配置互信

ssh-keygen
ssh-copy-id -i .ssh/id_rsa.pub root@harbor02
ssh-copy-id -i .ssh/id_rsa.pub root@harbor01

配置DNS解析

resolvectl status
resolvectl dns ens34 8.8.8.8

配置时钟服务

apt install chrony -y
systemctl restart chrony
systemctl enable chrony
systemctl status chrony

修改时区

timedatectl set-timezone Asia/Shanghai

安装docker

curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -

apt update

apt -y install apt-transport-https ca-certificates curl software-properties-common

add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

apt -y install docker-ce

安装 harbor节点配置

harbor节点操作

⾃签发证书

Harbor1 主节点执行

mkdir /apps/harbor/certs; cd /apps/harbor/certs

openssl genrsa -out harbor-ca.key

颁发crt证书

openssl req -x509 -new -nodes -key harbor-ca.key -subj "/CN=harbor.linuxarchitect.io" -days 7120 -out harbor-ca.crt

下载安装harbor

github.com/goharbor/ha…

mkdir -p /apps;cd /apps
wget https://github.com/goharbor/harbor/releases/download/v2.7.3/harbor-offline-installer-v2.7.3.tgz

tar -xf harbor-offline-installer-v2.7.3.tgz 

cd /apps/harbor;cp harbor.yml.tmpl harbor.yml

修改harbor配置⽂件

vim /apps/harbor/harbor.yml

hostname: harbor.linuxarchitect.io
http:
  port: 80

https:
  port: 443
  certificate: /apps/harbor/certs/harbor-ca.crt
  private_key: /apps/harbor/certs/harbor-ca.key

harbor_admin_password: 123456
database: 
    password: root123 
    max_idle_conns: 100
    max_open_conns: 900
data_volume: /data

安装

cd /apps/harbor; ./install.sh --with-trivy --with-chartmuseum

image.png

访问harbor.linuxarchitect.io/harbor/proj… image.png

客户端节点同步证书

Harbor2主节点执行

⾃签发证书同步

mkdir /etc/docker/certs.d/harbor.linuxarchitect.io -p

scp 172.31.7.104:/apps/harbor/certs/harbor-ca.crt /etc/docker/certs.d/harbor.linuxarchitect.io

配置insecure-registries 否则访问失败

root@harbor02:~# cat /etc/docker/daemon.json 
{"insecure-registries":["http://harbor.linuxarchitect.io"]}
root@harbor02:~# 

docker login测试 image.png

dokcer上传镜像测试

docker pull centos:7.9.2009

docker tag centos:7.9.2009 harbor.linuxarchitect.io/baseimages/centos:7.9.2009

docker push harbor.linuxarchitect.io/baseimages/centos:7.9.2009

已经上传成功 image.png

配置keepalived

apt-get install keepalived haproxy

配置vip

cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

harbor01节点 作为主节点 配置vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    garp_master_delay 10
    smtp_alert
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.31.7.188 dev ens33 label ens33:0
        172.31.7.189 dev ens33 label ens33:1
        172.31.7.190 dev ens33 label ens33:2
        172.31.7.191 dev ens33 label ens33:3
        172.31.7.192 dev ens33 label ens33:4
    }
}

注意 宿主机的网卡是ens33

systemctl restart keepalived

image.png

harbor02节点作为备节点

root@harbor02:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    garp_master_delay 10
    smtp_alert
    virtual_router_id 51
    priority 10
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.31.7.188 dev ens33 label ens33:0
        172.31.7.189 dev ens33 label ens33:1
        172.31.7.190 dev ens33 label ens33:2
        172.31.7.191 dev ens33 label ens33:3
        172.31.7.192 dev ens33 label ens33:4
    }
}

配置开机自启动

systemctl restart keepalived
systemctl enable keepalived

配置haproxy

harbor01节点操作

apt install haproxy

vim /etc/haproxy/haproxy.cfg

listen k8s_api_nodes_6443
 bind 172.31.7.188:6443
 mode tcp
 #balance leastconn
 server 172.31.7.101 172.31.7.101:6443 check inter 2000 fall 3 rise 5
 server 172.31.7.102 172.31.7.102:6443 check inter 2000 fall 3 rise 5
 server 172.31.7.103 172.31.7.103:6443 check inter 2000 fall 3 rise 5

监听172.31.7.188 vip然后将请求转发给k8s的3个master节点

systemctl restart haproxy.service
systemctl enable haproxy.service

一键重启容器

for line in `docker ps -a|awk '{print $1}'`;do echo docker restart $line; docker restart $line;done

安装ansible

harbor02节点作为部署节点

apt update 

apt install python3-pip git -y

pip3 install ansible -i https://mirrors.aliyun.com/pypi/simple/

使用ezdown下载部署项⽬及组件

harbor02节点作为部署节点

cd /opt/
export release=3.2.0

wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown

chmod a+x ezdown

## 下载所有的安装资源
./ezdown -D

image.png

生成一个配置文件

cd /etc/kubeasz;./ezctl new k8s-01

#INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
#INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'

配置ansible host文件

vim /etc/kubeasz/clusters/k8s-01/hosts

[etcd]
172.31.7.106
172.31.7.107
172.31.7.108

# master node(s)
[kube_master]
172.31.7.101
172.31.7.102

# work node(s)
[kube_node]
172.31.7.111
172.31.7.112


CONTAINER_RUNTIME="docker"

SERVICE_CIDR="10.100.0.0/16"

CLUSTER_CIDR="10.200.0.0/16"

NODE_PORT_RANGE="30000-60000"

CLUSTER_DNS_DOMAIN="magedu.local"

bin_dir="/usr/local/bin"

master和node都先配置2个 后面会手工扩容

配置 编辑config.yml ⽂件

vim /etc/kubeasz/clusters/k8s-01/config.yml

修改以下选项

INSECURE_REG: '["127.0.0.1/8","harbor.magedu.com","harbor.linuxarchitect.io"]'

MASTER_CERT_HOSTS:
   - "api.magedu.net"
   - "api.linuxarchitect.io"
   - "harbor.linuxarchitect.io"
   - "172.31.7.188"

MAX_PODS: 200


## 不需要安装core dns 后面手工安装
dns_install: "no"

ENABLE_LOCAL_DNS_CACHE: false

metricsserver_install: "no"

dashboard_install: "no"

HARBOR_SELF_SIGNED_CERT: false

查看安装步骤

./ezctl help setup

image.png

step1 步骤1-基础环境初始化

./ezctl setup k8s-01 01

step2 步骤2-部署etcd集群

./ezctl setup k8s-01 02

验证

export NODE_IPS="172.31.7.106 172.31.7.107 172.31.7.108"

for ip in ${NODE_IPS}; 
do 

ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;
done

image.png

step3 配置harbor

在部署节点操作

去harbor01机器执行操作 将证书先部署到部署节点mater01
scp /apps/harbor/certs/harbor-ca.crt harbor02:/etc/kubeasz/roles/docker/files/

vim /etc/kubeasz/roles/docker/tasks/main.yml 添加2个任务

### 补充创建一个存储目录
- block:
    - name: 准备docker相关目录
      file: name={{ item }} state=directory
      with_items:
      - "{{ bin_dir }}"
      - "/etc/docker"
      - "/etc/bash_completion.d"
      - "/etc/docker/certs.d/harbor.linuxarchitect.io"

## 添加一个分发的任务
    - name: docker证书分发      copy: src=harbor-ca.crt dest=/etc/docker/certs.d/harbor.linuxarchitect.io/harbor-ca.crt mode=0644

./ezctl setup k8s-01 03

step4 部署master节点

去master01节点节点执行 拉取镜像 然后上传到harbor

docker pull registry.aliyuncs.com/google_containers/pause:3.9


docker tag registry.aliyuncs.com/google_containers/pause:3.9 harbor.linuxarchitect.io/baseimages/pause:3.9 


docker login harbor.linuxarchitect.io

docker push harbor.linuxarchitect.io/baseimages/pause:3.9

image.png

回到部署节点harbor02继续操作

vim /etc/kubeasz/clusters/k8s-01/config.yml 修改pause镜像的下载地址

# 原先的配置 SANDBOX_IMAGE: "easzlab/pause:3.6"
SANDBOX_IMAGE: "harbor.linuxarchitect.io/baseimages/pause:3.9"
cd /etc/kubeasz; ./ezctl setup k8s-01 04

master节点已经部署完成 image.png

step5 部署node节点

cd /etc/kubeasz; ./ezctl setup k8s-01 05

node节点部署完成 image.png

step6 部署⽹络服务

登陆master01节点 将calico的镜像 上传到harbor

docker pull calico/cni:v3.19.3

docker tag calico/cni:v3.19.3 harbor.linuxarchitect.io/baseimages/calico-cni:v3.19.3

docker push harbor.linuxarchitect.io/baseimages/calico-cni:v3.19.3
docker pull calico/pod2daemon-flexvol:v3.19.3

docker tag docker.io/calico/pod2daemon-flexvol:v3.19.3 harbor.linuxarchitect.io/baseimages/calico-pod2daemon-flexvol:v3.19.3

docker push harbor.linuxarchitect.io/baseimages/calico-pod2daemon-flexvol:v3.19.3
docker pull calico/node:v3.19.3 

docker tag calico/node:v3.19.3 harbor.linuxarchitect.io/baseimages/calico-node:v3.19.3 

docker push harbor.linuxarchitect.io/baseimages/calico-node:v3.19.3
docker pull calico/kube-controllers:v3.19.3 

docker tag calico/kube-controllers:v3.19.3 harbor.linuxarchitect.io/baseimages/calico-kube-controllers:v3.19.3 

docker push harbor.linuxarchitect.io/baseimages/calico-kube-controllers:v3.19.3

回到部署节点harbor02节点操作 vim roles/calico/templates/calico-v3.19.yaml.j2 修改calico的镜像镜像的下载地址

grep image roles/calico/templates/calico-v3.19.yaml.j2
212         - name: install-cni
213           #image: docker.io/calico/cni:{{ calico_ver }}
214           image: harbor.linuxarchitect.io/baseimages/calico-cni:v3.19.3
215           command: ["/opt/cni/bin/install"]
216           envFrom:



257         - name: flexvol-driver
258           #image: docker.io/calico/pod2daemon-flexvol:{{ calico_ver }}
259           image: harbor.linuxarchitect.io/baseimages/calico-pod2daemon-flexvol:v3.19.3
260           volumeMounts:


269         - name: calico-node
270           #image: docker.io/calico/node:{{ calico_ver }}
271           image: harbor.linuxarchitect.io/baseimages/calico-node:v3.19.3
272           envFrom:

全部替换成harbor中的镜像 image.png

cd /etc/kubeasz; ./ezctl setup k8s-01 06

检查网络插件

calicoctl node status

image.png

部署coredns

在master01节点执行操作

vim coredns-1.8.6.yaml

# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get

- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        kubernetes magedu.local. in-addr.arpa ip6.arpa {
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . 8.8.8.8 {
            max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      containers:
      - name: coredns
        #image: k8s.gcr.io/coredns/coredns:v1.8.0
        image: coredns/coredns:1.8.6 
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 512Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  type: NodePort
  selector:
    k8s-app: kube-dns
  clusterIP: 10.100.0.2 
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
    nodePort: 30009
kubectl apply -f coredns-1.8.6.yaml

image.png

部署dashboard

官⽅的前端UI组件,部署kubernetes的web管理界⾯dashboard github.com/kubernetes/…

去master01节点 下载好镜像 然后push到harbor仓库

docker pull kubernetesui/dashboard:v2.5.1 

docker tag kubernetesui/dashboard:v2.5.1 harbor.linuxarchitect.io/baseimages/dashboard:v2.5.1 

docker push harbor.linuxarchitect.io/baseimages/dashboard:v2.5.1 
docker pull kubernetesui/metrics-scraper:v1.0.8 

docker tag kubernetesui/metrics-scraper:v1.0.8 harbor.linuxarchitect.io/baseimages/metrics-scraper:v1.0.8 

docker push harbor.linuxarchitect.io/baseimages/metrics-scraper:v1.0.8 

vim dashboard-v2.5.1.yaml

# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30002
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: kubernetes-dashboard
          image: harbor.linuxarchitect.io/baseimages/dashboard:v2.5.1 
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            - --token-ttl=43200
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: dashboard-metrics-scraper
          image: harbor.linuxarchitect.io/baseimages/metrics-scraper:v1.0.8 
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
kubectl apply -f dashboard-v2.5.1.yaml

image.png

创建用户 vim admin-user.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
kubectl apply -f admin-user.yaml

image.png

token登录dashboard:

kubectl get secret -A | grep admin

root@master01:~# kubectl get secret -A | grep admin
kubernetes-dashboard   admin-user-token-9q5b6                           kubernetes.io/service-account-token   3      2m32s
root@master01:~# 
root@master01:~# 
root@master01:~# kubectl describe secret admin-user-token-9q5b6 -n kubernetes-dashboard
kubectl  get svc -n kubernetes-dashboard

image.png

访问 https://172.31.7.101:30002/

mac电脑 需要盲敲 this is unsafe

image.png

然后把上面的token粘贴进来

image.png

集群节点伸缩

在部署节点harbor02节点执行以下操作

image.png

添加一个mater节点

./ezctl add-master k8s-01 172.31.7.103

image.png

添加一个node节点

./ezctl add-node k8s-01 172.31.7.113

image.png

删除node节点

./ezctl del-node k8s-01 172.31.7.113

删除master节点

./ezctl del-master k8s-01 172.31.7.103

image.png