K8s 基础应用部署(小节7)

102 阅读4分钟

部署kube-dns

  1. 安装DNS

Mstart101

创建目录

mkdir -p /etc/ansible/manifests/dns/kube-dns

上传软件包:点击下载

# cd /etc/ansible/manifests/dns/kube-dns

# ll
total 136996
drwxr-xr-x 3 root root     4096 Mar 10 09:23 ./
drwxr-xr-x 3 root root     4096 Mar 10 09:21 ../
-rw-r--r-- 1 root root  3983872 Mar 10 09:23 busybox-online.tar.gz
-rw-r--r-- 1 root root      277 Mar 10 09:23 busybox.yaml
drwxr-xr-x 2 root root     4096 Mar 10 09:23 heapster/
-rw-r--r-- 1 root root 41687040 Mar 10 09:23 k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 51441152 Mar 10 09:23 k8s-dns-kube-dns-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 43140608 Mar 10 09:23 k8s-dns-sidecar-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root     6305 Mar 10 09:23 kube-dns.yaml

打镜像一定要注意不要打错镜像

#镜像1
docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz

docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 harbor.123.com/baseimages/k8s-dns-kube-dns-amd64:1.14.13

docker push harbor.123.com/baseimages/k8s-dns-kube-dns-amd64:1.14.13

#镜像2
docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz

docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 harbor.123.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13

docker push harbor.123.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13

#镜像3
docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz

docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 harbor.123.com/baseimages/k8s-dns-sidecar-amd64:1.14.13

docker push harbor.123.com/baseimages/k8s-dns-sidecar-amd64:1.14.13

提供server name域名的解析 提供DNS缓存,降低kubedns负载,提供性能 定期检查kubedns和dnsmasq的健康状态

vim kube-dns.yaml

修改了 33 102 153 复制176后添加174 添加175 195 208 209

创建

# kubectl apply -f kube-dns.yaml
service/kube-dns created
serviceaccount/kube-dns created
configmap/kube-dns created
deployment.extensions/kube-dns created

# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-cd9cdc588-54lnd   1/1     Running   1          9h
calico-node-hpf25                         1/1     Running   1          8h
calico-node-pdplg                         1/1     Running   1          9h
calico-node-qmm9p                         1/1     Running   1          9h
calico-node-rxj65                         1/1     Running   1          8h
kube-dns-7657d6b74c-nnzdl                 3/3     Running   0          61s    <--

导入busybox(工具箱)、用于测试

docker load -i busybox-online.tar.gz

docker tag quay.io/prometheus/busybox:latest harbor.123.com/baseimages/busybox:latest

docker push harbor.123.com/baseimages/busybox:latest

编写yaml文件、有严格的缩进关系(要注意前后缩进、空格距离等……)

vim busybox.yaml

apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default  #default namespace的DNS
spec:
  containers:
  - image: harbor.123.com/baseimages/busybox:latest
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always
    name: busybox
  restartPolicy: Always

创建

# kubectl apply -f busybox.yaml 
pod/busybox created
# kubectl get pod
NAME                       READY   STATUS    RESTARTS   AGE
busybox                    1/1     Running   0          48m
net-test-cd766cb69-24lgk   1/1     Running   1          9h
net-test-cd766cb69-46dnb   1/1     Running   1          9h
net-test-cd766cb69-mwwlt   1/1     Running   1          9h
net-test-cd766cb69-snv44   1/1     Running   1          9h
kubectl exec -it busybox sh
/ # ifconfig
/ # exit
# kubectl get service --all-namespaces
NAMESPACE     NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes   ClusterIP   10.20.0.1       <none>        443/TCP         11h
kube-system   kube-dns     ClusterIP   10.20.254.254   <none>        53/UDP,53/TCP   70m

#解析域名的IP地址
# kubectl exec busybox nslookup kube-dns.kube-system.svc.linux01.local
Server:    10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux01.local

Name:      kube-dns.kube-system.svc.linux01.local
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux01.local


# kubectl exec busybox nslookup kubernetes.default.svc.linux01.local
Server:    10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux01.local

Name:      kubernetes.default.svc.linux01.local
Address 1: 10.20.0.1 kubernetes.default.svc.linux01.local

上传软件包:kubernetes-dashboard-amd64-v1.10.1.tar.gz(已解压)

cd /etc/ansible/manifests/dashboard/
mkdir 1.10.1
cd 1.10.1/

# ll
total 119496
drwxr-xr-x 2 root root      4096 Mar 10 11:54 ./
drwxr-xr-x 4 root root      4096 Mar 10 11:32 ../
-rw-r--r-- 1 root root       357 Oct  5  2019 admin-user-sa-rbac.yaml
-rw-r--r-- 1 root root 122320384 Oct  5  2019 kubernetes-dashboard-amd64-v1.10.1.tar.gz
-rw-r--r-- 1 root root      4766 Oct  5  2019 kubernetes-dashboard.yaml
-rw-r--r-- 1 root root      2223 Oct  5  2019 read-user-sa-rbac.yaml
-rw-r--r-- 1 root root       458 Oct  5  2019 ui-admin-rbac.yaml
-rw-r--r-- 1 root root       477 Oct  5  2019 ui-read-rbac.yaml

打镜像一定要注意不要打错镜像

docker load -i kubernetes-dashboard-amd64-v1.10.1.tar.gz

docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.10.1 harbor.123.com/baseimages/kubernetes-dashboard-amd64:v1.10.1

docker push harbor.123.com/baseimages/kubernetes-dashboard-amd64:v1.10.1

修改配置信息

vim kubernetes-dashboard.yaml

        image: harbor.123.com/baseimages/kubernetes-dashboard-amd64:v1.10.1

执行当前目录下'.yaml'文件

kubectl apply -f .

确保kubernetes-dashboard成功运行

# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-cd9cdc588-54lnd   1/1     Running   1          11h
calico-node-hpf25                         1/1     Running   1          10h
calico-node-pdplg                         1/1     Running   2          11h
calico-node-qmm9p                         1/1     Running   1          11h
calico-node-rxj65                         1/1     Running   1          10h
kube-dns-7657d6b74c-nnzdl                 3/3     Running   0          120m
kubernetes-dashboard-5f46dbb585-xpdgl     1/1     Running   0          2m33s

查看当前集群状态

kubectl cluster-info
Kubernetes master is running at https://192.168.37.240:6443
KubeDNS is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

浏览器访问 'kubernetes-dashboard'

图片.png

#kubectl -n kube-system get secret
NAME                                  TYPE                                  DATA   AGE
admin-user-token-7qzql                kubernetes.io/service-account-token   3      11m    <--
calico-etcd-secrets                   Opaque                                3      11h
calico-kube-controllers-token-srsbv   kubernetes.io/service-account-token   3      11h
calico-node-token-9qg5t               kubernetes.io/service-account-token   3      11h
dashboard-read-user-token-fv977       kubernetes.io/service-account-token   3      11m
default-token-kjqhw                   kubernetes.io/service-account-token   3      11h
kube-dns-token-vntv9                  kubernetes.io/service-account-token   3      130m
kubernetes-dashboard-certs            Opaque                                0      12m
kubernetes-dashboard-key-holder       Opaque                                2      12m
kubernetes-dashboard-token-95fqn      kubernetes.io/service-account-token   3      12m


# kubectl -n kube-system describe secret admin-user-token-7qzql |grep token
Name:         admin-user-token-7qzql
Type:  kubernetes.io/service-account-token
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTdxenFsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJmY2I4NzcxNy1iZWY4LTExZWQtYTU4Yy0wMDBjMjllOTA3N2QiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.akRoxi5Kk4abXg3bPZe68aaOmRsWuuW_q-Ak080ZjH3h4mGHTlEr1sez9526L-9wVJVN7rhpMoDnn062mcmUEiEZPdzF2BFYTqu5sb-_MuX_vQx-ZCmwh-RBRF2v54YgpaohFVLGaahcgqYOpGGCqG1MhNhzevbG_1PyYxFIqlUSQURHTQRsAGrXVqemDSln-OFP1LOKjXMQV-jLAntdpGVCn2RVXNf2vbWT7gwwwuLdm-uIm0QbRTLw9zjxSbEzNuJS6gOFEP2QaMCzvLNZeA-Tm5m6kGYyhz2TQurKKdNVA_8rnwg0o0q4aD0H3Dc4AV38dMFlxoQ1M2goGLl3Rw

图片.png

图片.png

打镜像

cd /etc/ansible/manifests/dns/kube-dns/heapster

docker load -i heapster-amd64_v1.5.1.tar

docker tag gcr.io/google-containers/heapster-amd64:v1.5.1 harbor.123.com/baseimages/heapster-amd64:v1.5.1

docker push harbor.123.com/baseimages/heapster-amd64:v1.5.1

#修改配置文件中此项
vim heapster.yaml
...
        image: harbor.123.com/baseimages/heapster-amd64:v1.5.1

web界面

docker load -i heapster-grafana-amd64-v4.4.3.tar

docker tag 8cb3de219af7 harbor.123.com/baseimages/heapster-grafana-amd64:v4.4.3

docker push harbor.123.com/baseimages/heapster-grafana-amd64:v4.4.3

#web界面
vim grafana.yaml
...
        image: harbor.123.com/baseimages/heapster-grafana-amd64:v4.4.3

数据存储

docker load -i heapster-influxdb-amd64_v1.3.3.tar

docker tag gcr.io/google-containers/heapster-influxdb-amd64:v1.3.3 harbor.123.com/baseimages/heapster-influxdb-amd64:v1.3.3

docker push harbor.123.com/baseimages/heapster-influxdb-amd64:v1.3.3

#数据存储
vim influxdb.yaml
...
        image: harbor.123.com/baseimages/heapster-influxdb-amd64:v1.3.3 

执行当前目录下'.yaml'文件

kubectl apply -f .
# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-cd9cdc588-54lnd   1/1     Running   1          12h
calico-node-hpf25                         1/1     Running   1          11h
calico-node-pdplg                         1/1     Running   2          12h
calico-node-qmm9p                         1/1     Running   1          12h
calico-node-rxj65                         1/1     Running   1          12h
heapster-86749b8d5f-b88cc                 1/1     Running   0          21s
kube-dns-7657d6b74c-nnzdl                 3/3     Running   0          3h9m
kubernetes-dashboard-5f46dbb585-xpdgl     1/1     Running   0          71m
monitoring-grafana-b689448c4-qgbnx        1/1     Running   0          21s
monitoring-influxdb-8697d694c7-26d2n      1/1     Running   0          21s
# kubectl get pod -n kube-system -o wide
NAME                                      READY   STATUS    RESTARTS   AGE     IP               NODE             NOMINATED NODE   READINESS GATES
calico-kube-controllers-cd9cdc588-54lnd   1/1     Running   1          12h     192.168.37.111   192.168.37.111   <none>           <none>
calico-node-hpf25                         1/1     Running   1          11h     192.168.37.110   192.168.37.110   <none>           <none>
calico-node-pdplg                         1/1     Running   2          12h     192.168.37.101   192.168.37.101   <none>           <none>
calico-node-qmm9p                         1/1     Running   1          12h     192.168.37.111   192.168.37.111   <none>           <none>
calico-node-rxj65                         1/1     Running   1          12h     192.168.37.102   192.168.37.102   <none>           <none>
heapster-86749b8d5f-b88cc                 1/1     Running   0          73s     172.20.166.134   192.168.37.110   <none>           <none>
kube-dns-7657d6b74c-nnzdl                 3/3     Running   0          3h10m   172.20.166.133   192.168.37.110   <none>           <none>
kubernetes-dashboard-5f46dbb585-xpdgl     1/1     Running   0          72m     172.20.104.6     192.168.37.111   <none>           <none>
monitoring-grafana-b689448c4-qgbnx        1/1     Running   0          73s     172.20.104.7     192.168.37.111   <none>           <none>
monitoring-influxdb-8697d694c7-26d2n      1/1     Running   0          73s     172.20.104.8     192.168.37.111   <none>           <none>

查看monitoring-grafana界面

kubectl cluster-info
Kubernetes master is running at https://192.168.37.240:6443
KubeDNS is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
monitoring-grafana is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
monitoring-influxdb is running at https://192.168.37.240:6443/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

grafana官网

图片.png

# kubectl get service
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.20.0.1    <none>        443/TCP   13h


# kubectl get service --all-namespaces
NAMESPACE     NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes             ClusterIP   10.20.0.1       <none>        443/TCP         13h
kube-system   heapster               ClusterIP   10.20.129.183   <none>        80/TCP          21m
kube-system   kube-dns               ClusterIP   10.20.254.254   <none>        53/UDP,53/TCP   3h29m
kube-system   kubernetes-dashboard   NodePort    10.20.0.80      <none>        443:59217/TCP   92m
kube-system   monitoring-grafana     ClusterIP   10.20.244.227   <none>        80/TCP          21m
kube-system   monitoring-influxdb    ClusterIP   10.20.159.9     <none>        8086/TCP        21m

#删除heapster
# kubectl delete service heapster -n kube-system
service "heapster" deleted
#发现heapster被删除
# kubectl get service --all-namespaces
NAMESPACE     NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes             ClusterIP   10.20.0.1       <none>        443/TCP         13h
kube-system   kube-dns               ClusterIP   10.20.254.254   <none>        53/UDP,53/TCP   3h37m
kube-system   kubernetes-dashboard   NodePort    10.20.0.80      <none>        443:59217/TCP   99m
kube-system   monitoring-grafana     ClusterIP   10.20.244.227   <none>        80/TCP          28m
kube-system   monitoring-influxdb    ClusterIP   10.20.159.9     <none>        8086/TCP        28m

#创建
# kubectl apply -f heapster.yaml

#IP地址发生变化
# kubectl get service --all-namespaces |grep heapster
NAMESPACE     NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
kube-system   heapster               ClusterIP   10.20.120.29    <none>        80/TCP          4s