14.安装Calico
14.1 安装calico
[root@ansible-server ansible]# mkdir -p roles/calico/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico
[root@ansible-server calico]# ls
tasks templates vars
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,POD_SUBNET改成自己规划的容器网段
[root@ansible-server calico]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
POD_SUBNET: 192.168.0.0/12
[root@ansible-server calico]# cat templates/calico-etcd.yaml.j2
---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# The keys below should be uncommented and the values populated with the base64
# encoded contents of each file that would be associated with the TLS data.
# Example command for encoding a file contents: cat <file> | base64 -w 0
# etcd-key: null
# etcd-cert: null
# etcd-ca: null
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "" # "/calico-secrets/etcd-ca"
etcd_cert: "" # "/calico-secrets/etcd-cert"
etcd_key: "" # "/calico-secrets/etcd-key"
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Pods are monitored for changing labels.
# The node controller monitors Kubernetes nodes.
# Namespace and serviceaccount labels are used for policy.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- serviceaccounts
verbs:
- watch
- list
- get
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.21.4
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.21.4
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.21.4
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.21.4
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0440
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml
---
# Source: calico/templates/kdd-crds.yaml
#修改下面内容
[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
[root@ansible-server calico]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.etcd %}https://{{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2
[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
etcd_endpoints: {% for i in groups.etcd %}https://{{ hostvars[i].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}
[root@ansible-server calico]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
template:
src: calico-etcd.yaml.j2
dest: /root/calico-etcd.yaml
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico]# vim tasks/config.yml
- name: get ETCD_KEY key
shell:
cmd: cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'
register: ETCD_KEY
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-key:) null'
replace: '\1 {{ ETCD_KEY.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
shell:
cmd: cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'
register: ETCD_CERT
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-cert:) null'
replace: '\1 {{ ETCD_CERT.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
shell:
cmd: cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'
when:
- ansible_hostname=="k8s-master01"
register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-ca:) null'
replace: '\1 {{ ETCD_CA.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_ca:) ""'
replace: '\1 "/calico-secrets/etcd-ca"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_cert:) ""'
replace: '\1 "/calico-secrets/etcd-cert"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_key:) ""'
replace: '\1 "/calico-secrets/etcd-key"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
replace: '\1'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (value:) "192.168.0.0/16"'
replace: ' \1 "{{ POD_SUBNET }}"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
replace:
path: /root/calico-etcd.yaml
regexp: '(.*image:) docker.io/calico(/.*)'
replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico]# vim tasks/download_images.yml
- name: get calico version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' calico-etcd.yaml
register: CALICO_VERSION
when:
- ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky expect package
yum:
name: expect
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-master01"
- name: install Ubuntu expect package
apt:
name: expect
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-master01"
- name: download calico image
shell:
cmd: |
{% for i in CALICO_VERSION.stdout_lines %}
ctr images pull --all-platforms registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
ctr images tag registry.cn-beijing.aliyuncs.com/raymond9/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
ctr images remove registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
expect <<EOF
spawn ctr images push --plain-http -u {{ USERNAME }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
expect "Password:" { send "{{ PASSWORD }}\n";exp_continue }
EOF
{% endfor %}
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico]# vim tasks/install_calico.yml
- name: install calico
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f calico-etcd.yaml"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml
[root@ansible-server calico]# cd ../../
[root@ansible-server ansible]# tree roles/calico
roles/calico
├── tasks
│ ├── calico_file.yml
│ ├── config.yml
│ ├── download_images.yml
│ ├── install_calico.yml
│ └── main.yml
├── templates
│ └── calico-etcd.yaml.j2
└── vars
└── main.yml
3 directories, 7 files
[root@ansible-server ansible]# vim calico_role.yml
---
- hosts: master:etcd
roles:
- role: calico
[root@ansible-server ansible]# ansible-playbook calico_role.yml
14.2 验证calico
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep calico
calico-kube-controllers-786596c789-gkkfk 1/1 Running 0 77s
calico-node-4d5hz 1/1 Running 0 77s
calico-node-4v697 1/1 Running 0 77s
calico-node-g5nk9 1/1 Running 0 77s
calico-node-ljvlp 1/1 Running 0 77s
calico-node-vttcj 1/1 Running 0 77s
calico-node-wwhrl 1/1 Running 0 77s
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local Ready <none> 39m v1.23.6
k8s-master02.example.local Ready <none> 39m v1.23.6
k8s-master03.example.local Ready <none> 39m v1.23.6
k8s-node01.example.local Ready <none> 27m v1.23.6
k8s-node02.example.local Ready <none> 27m v1.23.6
k8s-node03.example.local Ready <none> 27m v1.23.6
15.安装 CoreDNS
15.1 安装 CoreDNS
[root@ansible-server ansible]# mkdir -p roles/coredns/{tasks,templates,vars}
[root@ansible-server ansible]# cd roles/coredns/
[root@ansible-server coredns]# ls
tasks templates vars
#下面CLUSTERDNS改成自己规划的service网段的第10个IP地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server coredns]# vim vars/main.yml
CLUSTERDNS: 10.96.0.10
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
[root@ansible-server coredns]# cat templates/coredns.yaml.j2
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.6
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
[root@ansible-server coredns]# vim templates/coredns.yaml.j2
...
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop ##将loop插件直接删除,避免内部循环
reload
loadbalance
}
...
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ CLUSTERDNS }} #修改这里
...
[root@ansible-server coredns]# vim tasks/coredns_file.yml
- name: copy coredns.yaml file
template:
src: coredns.yaml.j2
dest: /root/coredns.yaml
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server coredns]# vim tasks/config.yml
- name: Modify the "image:" line
replace:
path: /root/coredns.yaml
regexp: '(.*image:) coredns(/.*)'
replace: '\1 {{ HARBOR_DOMAIN }}\2'
[root@ansible-server coredns]# vim tasks/download_images.yml
- name: get coredns version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' coredns.yaml
register: COREDNS_VERSION
- name: download coredns image
shell:
cmd: |
{% for i in COREDNS_VERSION.stdout_lines %}
ctr images pull --all-platforms registry.aliyuncs.com/google_containers/{{ i }}
ctr images tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
ctr images remove registry.aliyuncs.com/google_containers/{{ i }}
expect <<EOF
spawn ctr images push --plain-http -u {{ USERNAME }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
expect "Password:" { send "{{ PASSWORD }}\n";exp_continue }
EOF
{% endfor %}
[root@ansible-server coredns]# vim tasks/install_coredns.yml
- name: install coredns
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f coredns.yaml"
[root@ansible-server coredns]# vim tasks/main.yml
- include: coredns_file.yml
- include: config.yml
- include: download_images.yml
- include: install_coredns.yml
[root@ansible-server coredns]# cd ../../
[root@ansible-server ansible]# tree roles/coredns/
roles/coredns/
├── tasks
│ ├── config.yml
│ ├── coredns_file.yml
│ ├── download_images.yml
│ ├── install_coredns.yml
│ └── main.yml
├── templates
│ └── coredns.yaml.j2
└── vars
└── main.yml
3 directories, 7 files
[root@ansible-server ansible]# vim coredns_role.yml
---
- hosts: master01
roles:
- role: coredns
[root@ansible-server ansible]# ansible-playbook coredns_role.yml
15.2 验证coredns
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep coredns
coredns-66d5dc5c47-95267 1/1 Running 0 2m3s
16.安装Metrics
16.1 安装metrics
[root@ansible-server ansible]# mkdir -p roles/metrics/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics/
[root@ansible-server metrics]# ls
files tasks vars
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server metrics]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
[root@ansible-server metrics]# cat files/components.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
#修改下面内容:
[root@k8s-master01 ~]# vim components.yaml
...
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
#添加下面内容
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
...
volumeMounts:
- mountPath: /tmp
name: tmp-dir
#添加下面内容
- name: ca-ssl
mountPath: /etc/kubernetes/pki
...
volumes:
- emptyDir: {}
name: tmp-dir
#添加下面内容
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
[root@ansible-server metrics]# vim tasks/metrics_file.yml
- name: copy components.yaml file
copy:
src: components.yaml
dest: /root/components.yaml
[root@ansible-server metrics]# vim tasks/config.yml
- name: Modify the "image:" line
replace:
path: /root/components.yaml
regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'
[root@ansible-server metrics]# vim tasks/download_images.yml
- name: get metrics version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' components.yaml
register: METRICS_VERSION
- name: download metrics image
shell:
cmd: |
{% for i in METRICS_VERSION.stdout_lines %}
ctr images pull --all-platforms registry.aliyuncs.com/google_containers/{{ i }}
ctr images tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
ctr images remove registry.aliyuncs.com/google_containers/{{ i }}
expect <<EOF
spawn ctr images push --plain-http -u {{ USERNAME }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
expect "Password:" { send "{{ PASSWORD }}\n";exp_continue }
EOF
{% endfor %}
[root@ansible-server metrics]# vim tasks/install_metrics.yml
- name: install metrics
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f components.yaml"
[root@ansible-server metrics]# vim tasks/main.yml
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml
[root@ansible-server metrics]# cd ../../
[root@ansible-server ansible]# tree roles/metrics/
roles/metrics/
├── files
│ └── components.yaml
├── tasks
│ ├── config.yml
│ ├── download_images.yml
│ ├── install_metrics.yml
│ ├── main.yml
│ └── metrics_file.yml
└── vars
└── main.yml
3 directories, 7 files
[root@ansible-server ansible]# vim metrics_role.yml
---
- hosts: master01
roles:
- role: metrics
[root@ansible-server ansible]# ansible-playbook metrics_role.yml
16.2 验证metrics
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-6f6d4cd59-kbxw6 1/1 Running 0 74s
[root@k8s-master01 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01.example.local 101m 5% 1759Mi 48%
k8s-master02.example.local 83m 4% 1484Mi 41%
k8s-master03.example.local 119m 5% 1509Mi 41%
k8s-node01.example.local 68m 3% 771Mi 21%
k8s-node02.example.local 70m 3% 824Mi 22%
k8s-node03.example.local 74m 3% 818Mi 22%
17.安装dashboard
17.1 安装dashboard
[root@ansible-server ansible]# mkdir -p roles/dashboard/{tasks,vars,files,templates}
[root@ansible-server ansible]# cd roles/dashboard/
[root@ansible-server dashboard]# ls
files tasks templates vars
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server dashboard]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
NODEPORT: 30005
[root@ansible-server dashboard]# cat templates/recommended.yaml.j2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.4.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.7
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
[root@ansible-server dashboard]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #添加这行
ports:
- port: 443
targetPort: 8443
nodePort: {{ NODEPORT }} #添加这行
selector:
k8s-app: kubernetes-dashboard
...
[root@ansible-server dashboard]# vim files/admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
[root@ansible-server dashboard]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
template:
src: recommended.yaml.j2
dest: /root/recommended.yaml
- name: copy admin.yaml file
copy:
src: admin.yaml
dest: /root/admin.yaml
[root@ansible-server dashboard]# vim tasks/config.yml
- name: Modify the "image:" line
replace:
path: /root/recommended.yaml
regexp: '(.*image:) kubernetesui(/.*)'
replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'
[root@ansible-server dashboard]# vim tasks/download_images.yml
- name: get dashboard version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' recommended.yaml
register: DASHBOARD_VERSION
- name: download dashboard image
shell:
cmd: |
{% for i in DASHBOARD_VERSION.stdout_lines %}
ctr images pull --all-platforms registry.aliyuncs.com/google_containers/{{ i }}
ctr images tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
ctr images remove registry.aliyuncs.com/google_containers/{{ i }}
expect <<EOF
spawn ctr images push --plain-http -u {{ USERNAME }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
expect "Password:" { send "{{ PASSWORD }}\n";exp_continue }
EOF
{% endfor %}
[root@ansible-server dashboard]# vim tasks/install_dashboard.yml
- name: install dashboard
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f recommended.yaml -f admin.yaml"
[root@ansible-server dashboard]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml
[root@ansible-server dashboard]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard/
roles/dashboard/
├── files
│ └── admin.yaml
├── tasks
│ ├── config.yml
│ ├── dashboard_file.yml
│ ├── download_images.yml
│ ├── install_dashboard.yml
│ └── main.yml
├── templates
│ └── recommended.yaml.j2
└── vars
└── main.yml
4 directories, 8 files
[root@ansible-server ansible]# vim dashboard_role.yml
---
- hosts: master01
roles:
- role: dashboard
[root@ansible-server ansible]# ansible-playbook dashboard_role.yml
17.2 登录dashboard
https://172.31.3.101:30005
查看token值:
[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name: admin-user-token-vh65j
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 02afbef9-872f-4ab6-9741-a94e19df2d54
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1411 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkJTRk9vTmt0OFhYWmhPaWkzbklvRUtWd1psU2xWbzlzRWhWY0hnNEVLYm8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXZoNjVqIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIwMmFmYmVmOS04NzJmLTRhYjYtOTc0MS1hOTRlMTlkZjJkNTQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.NnZ7gsK4XR7V_5OWVMeDtVjit9_SzZaV3AUYnIF8sJzKM7-WjnC6d4P9WwlbG9Tjx_tSVt4f0er9zznyQjX36tPsJ4fI6pnjMkgotddnR_PpIpnsgtMzhXtLwTjX35RrTuw4MoFg_rjnCUOs5xl5JHXPbYfwT0yVBglV4a62xnPcRkwowSGUwTlFbhqUebFLGXHIVCphHmCAWDke5u4nyTA5RvyYLC_bmbRAw8KxJNNseu3g7JQkHYQ5-aA1pD8RJuvAhE5StCbuYqRZNWK36qSE_1bE6lpl6feaEM9-8ZI3_FNi7_gtFaVItQFO-BchoXsfYYfAGCRGPEdhAqxxCQ