- docker
- harbor
- 03 K8S 安装准备
- 04 K8s 部署
- 05 k8s deployment+service
一、kubernetes/dashboard[master]
[root@VM-16-14-centos data]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
[root@VM-16-14-centos data]# kubectl get pods -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default nginx-alpine-deployment-7fb7fd49b4-fx6qf 1/1 Running 0 4h11m 172.30.1.103 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-hjbz2 1/1 Running 0 4h11m 172.30.2.58 vm-16-4-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-q5p9w 1/1 Running 0 4h11m 172.30.1.102 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-q7skj 1/1 Running 0 4h11m 172.30.1.104 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-xx2vp 1/1 Running 0 4h11m 172.30.2.57 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-gq4fl 1/1 Running 0 4h10m 172.30.2.59 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-pxspv 1/1 Running 0 4h10m 172.30.2.60 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-wfkzf 1/1 Running 0 4h10m 172.30.2.61 vm-16-4-centos <none> <none>
kube-system coredns-78fcd69978-mz7mv 1/1 Running 0 6h31m 172.30.0.2 vm-16-14-centos <none> <none>
kube-system coredns-78fcd69978-qspg4 1/1 Running 0 6h31m 172.30.0.3 vm-16-14-centos <none> <none>
kube-system etcd-vm-16-14-centos 1/1 Running 0 6h31m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-apiserver-vm-16-14-centos 1/1 Running 0 6h31m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-controller-manager-vm-16-14-centos 1/1 Running 0 6h31m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-flannel-ds-cljlp 1/1 Running 0 5h44m 10.206.16.6 vm-16-6-centos <none> <none>
kube-system kube-flannel-ds-kx55s 1/1 Running 0 6h25m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-flannel-ds-qs42h 1/1 Running 0 5h39m 10.206.16.4 vm-16-4-centos <none> <none>
kube-system kube-proxy-7nxvd 1/1 Running 1 (5h22m ago) 5h39m 10.206.16.4 vm-16-4-centos <none> <none>
kube-system kube-proxy-nvqvg 1/1 Running 0 6h31m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-proxy-t2vnp 1/1 Running 0 5h44m 10.206.16.6 vm-16-6-centos <none> <none>
kube-system kube-scheduler-vm-16-14-centos 1/1 Running 0 6h31m 10.206.16.14 vm-16-14-centos <none> <none>
kubernetes-dashboard dashboard-metrics-scraper-c45b7869d-9kw5c 1/1 Running 0 70s 172.30.1.108 vm-16-6-centos <none> <none>
kubernetes-dashboard kubernetes-dashboard-576cb95f94-msbtb 1/1 Running 0 70s 172.30.1.107 vm-16-6-centos <none> <none>
#大约2分钟,running
[root@VM-16-14-centos data]# kubectl describe pod kubernetes-dashboard-576cb95f94-msbtb --namespace=kubernetes-dashboard
[root@VM-16-14-centos data]# kubectl get svc -o wide --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 6h38m <none>
default nginx ClusterIP 10.254.169.166 <none> 80/TCP 4h7m app=nginx
default nginx-nodeport NodePort 10.254.99.248 <none> 18080:30080/TCP 13m app=nginx
kube-system kube-dns ClusterIP 10.254.0.10 <none> 53/UDP,53/TCP,9153/TCP 6h38m k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.254.62.72 <none> 8000/TCP 7m45s k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard ClusterIP 10.254.158.48 <none> 443/TCP 7m46s k8s-app=kubernetes-dashboard
[root@VM-16-14-centos data]#
#修改为nodeport模式
[root@VM-16-14-centos data]# kubectl edit svc/kubernetes-dashboard --namespace=kubernetes-dashboard
[root@VM-16-14-centos data]# kubectl get svc -o wide --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 6h47m <none>
default nginx ClusterIP 10.254.169.166 <none> 80/TCP 4h16m app=nginx
default nginx-nodeport NodePort 10.254.99.248 <none> 18080:30080/TCP 22m app=nginx
kube-system kube-dns ClusterIP 10.254.0.10 <none> 53/UDP,53/TCP,9153/TCP 6h47m k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.254.62.72 <none> 8000/TCP 16m k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard NodePort 10.254.158.48 <none> 443:31853/TCP 16m k8s-app=kubernetes-dashboard
1.1、创建dashborad账号
[root@VM-16-14-centos data]
serviceaccount/dashboard-admin created
[root@VM-16-14-centos data]
clusterrolebinding.rbac.authorization.k8s.io/dashboard-cluster-admin created
[root@VM-16-14-centos data]
dashboard-admin 1 28s
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
Name: dashboard-admin-token-zg9th
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: b184726d-d843-4955-8985-285ec533e731
Type: kubernetes.io/service-account-token
Data
====
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6InJLQnlrdktBU0xmcTRmQndudVFTM2NzMGhnelFwRFhrM21kOEZjb2xEemsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4temc5dGgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiYjE4NDcyNmQtZDg0My00OTU1LTg5ODUtMjg1ZWM1MzNlNzMxIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.VD5isrvXS0lu-DAJLdUTKJm3bFWfanJlBUxjLZvckPZO0dhjoJXl0OS7Ed5iNKGNyH_5iPzZC9PT9_cVhJMPWeACsYcPRzsNvf4H5SwBJRDKZboAGa3Lik_sAGEiUIt1hy_jIeJArXyzGLi-OW7nKWyINaVv6XiFk64SEtE2R1Np1KXtLcMzIpvWM3Wz3sFy8v6VMqW3m3_av6PJ4S-QXbAvECIw4CuIRLeZHsYEyGXQ8fc2nv89eEc1iUZinDyMN69wuD-rliQyekAqWe0qj06YyWkhvYlPodvFrQeJe9_-jlBCma4z-E-6visGTQbfhN20r4iS-Rgxxe7H50UUjw
ca.crt: 1099 bytes
1.2、使用token登录
二、metrics-server[master]
[root@VM-16-14-centos data]
error: Metrics API not available
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
2.1、报错解决
[root@VM-16-14-centos data]# kubectl get pods -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default nginx-alpine-deployment-7fb7fd49b4-fx6qf 1/1 Running 0 5h1m 172.30.1.103 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-hjbz2 1/1 Running 0 5h1m 172.30.2.58 vm-16-4-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-q5p9w 1/1 Running 0 5h1m 172.30.1.102 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-q7skj 1/1 Running 0 5h1m 172.30.1.104 vm-16-6-centos <none> <none>
default nginx-alpine-deployment-7fb7fd49b4-xx2vp 1/1 Running 0 5h1m 172.30.2.57 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-gq4fl 1/1 Running 0 5h 172.30.2.59 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-pxspv 1/1 Running 0 5h 172.30.2.60 vm-16-4-centos <none> <none>
default nginx-deployment-5ff58d798d-wfkzf 1/1 Running 0 5h 172.30.2.61 vm-16-4-centos <none> <none>
kube-system coredns-78fcd69978-mz7mv 1/1 Running 0 7h21m 172.30.0.2 vm-16-14-centos <none> <none>
kube-system coredns-78fcd69978-qspg4 1/1 Running 0 7h21m 172.30.0.3 vm-16-14-centos <none> <none>
kube-system etcd-vm-16-14-centos 1/1 Running 0 7h21m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-apiserver-vm-16-14-centos 1/1 Running 0 7h21m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-controller-manager-vm-16-14-centos 1/1 Running 0 7h21m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-flannel-ds-cljlp 1/1 Running 0 6h34m 10.206.16.6 vm-16-6-centos <none> <none>
kube-system kube-flannel-ds-kx55s 1/1 Running 0 7h15m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-flannel-ds-qs42h 1/1 Running 0 6h29m 10.206.16.4 vm-16-4-centos <none> <none>
kube-system kube-proxy-7nxvd 1/1 Running 1 (6h12m ago) 6h29m 10.206.16.4 vm-16-4-centos <none> <none>
kube-system kube-proxy-nvqvg 1/1 Running 0 7h21m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system kube-proxy-t2vnp 1/1 Running 0 6h34m 10.206.16.6 vm-16-6-centos <none> <none>
kube-system kube-scheduler-vm-16-14-centos 1/1 Running 0 7h21m 10.206.16.14 vm-16-14-centos <none> <none>
kube-system metrics-server-68755d95b-bqwxg 0/1 Running 0 44s 172.30.1.110 vm-16-6-centos <none> <none>
kube-system metrics-server-9946cf8d9-6m6s4 0/1 ImagePullBackOff 0 3m35s 172.30.2.62 vm-16-4-centos <none> <none>
kubernetes-dashboard dashboard-metrics-scraper-c45b7869d-9kw5c 1/1 Running 0 51m 172.30.1.108 vm-16-6-centos <none> <none>
kubernetes-dashboard kubernetes-dashboard-576cb95f94-msbtb 1/1 Running 0 51m 172.30.1.107 vm-16-6-centos <none> <none>
[root@VM-16-14-centos data]# kubectl delete -f components.yaml
[root@VM-16-14-centos data]# vim components.yaml
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.1
修改为
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.5.1
[root@VM-16-14-centos data]# kubectl apply -f components.yaml
[root@VM-16-14-centos data]# kubectl get pods -o wide --all-namespaces
[root@VM-16-14-centos data]# kubectl top node
Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io)
[root@VM-16-14-centos data]# kubectl -n kube-system logs metrics-server-68755d95b-6ztpj
I1024 16:37:32.867332 1 server.go:188] "Failed probe" probe="metric-storage-ready" err="not metrics to serve"
I1024 16:37:39.692731 1 server.go:188] "Failed probe" probe="metric-storage-ready" err="not metrics to serve"
I1024 16:37:42.867484 1 server.go:188] "Failed probe" probe="metric-storage-ready" err="not metrics to serve"
E1024 16:37:44.296616 1 scraper.go:139] "Failed to scrape node" err="Get \"https://10.206.16.14:10250/stats/summary?only_cpu_and_memory=true\": x509: cannot validate certificate for 10.206.16.14 because it doesn't contain any IP SANs" node="vm-16-14-centos"
E1024 16:37:44.297356 1 scraper.go:139] "Failed to scrape node" err="Get \"https://10.206.16.6:10250/stats/summary?only_cpu_and_memory=true\": x509: cannot validate certificate for 10.206.16.6 because it doesn't contain any IP SANs" node="vm-16-6-centos"
E1024 16:37:44.310631 1 scraper.go:139] "Failed to scrape node" err="Get \"https://10.206.16.4:10250/stats/summary?only_cpu_and_memory=true\": x509: cannot validate certificate for 10.206.16.4 because it doesn't contain any IP SANs" node="vm-16-4-centos"
I1024 16:37:52.868113 1 server.go:188] "Failed probe" probe="metric-storage-ready" err="not metrics to serve"
[root@VM-16-14-centos data]# vim components.yaml
增加
- --kubelet-insecure-tls
[root@VM-16-14-centos data]# kubectl apply -f components.yaml
[root@VM-16-14-centos data]# kubectl get pods -o wide --all-namespaces
[root@VM-16-14-centos data]# kubectl top pods
NAME CPU(cores) MEMORY(bytes)
nginx-alpine-deployment-7fb7fd49b4-fx6qf 0m 2Mi
nginx-alpine-deployment-7fb7fd49b4-hjbz2 0m 2Mi
nginx-alpine-deployment-7fb7fd49b4-q5p9w 0m 2Mi
nginx-alpine-deployment-7fb7fd49b4-q7skj 0m 2Mi
nginx-alpine-deployment-7fb7fd49b4-xx2vp 0m 2Mi
nginx-deployment-5ff58d798d-gq4fl 0m 2Mi
nginx-deployment-5ff58d798d-pxspv 0m 2Mi
nginx-deployment-5ff58d798d-wfkzf 0m 2Mi
[root@VM-16-14-centos data]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
vm-16-14-centos 135m 6% 2016Mi 55%
vm-16-4-centos 39m 1% 1153Mi 31%
vm-16-6-centos 38m 1% 1306Mi 35%
三、kube-shell
[root@VM-16-14-centos data]
[root@VM-16-14-centos ~]
[root@VM-16-14-centos ~]
kube-shell> exit
四、ipvsadm
[root@VM-16-14-centos ~]# ipvsadm -L
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
五、helm
[root@VM-16-14-centos data]
rz waiting to receive.**B0100000023be50
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
linux-amd64/
linux-amd64/helm
linux-amd64/LICENSE
linux-amd64/README.md
[root@VM-16-14-centos data]
[root@VM-16-14-centos data]
version.BuildInfo{Version:"v3.7.1", GitCommit:"1d11fcb5d3f3bf00dbe6fe31b8412839a96b3dc4", GitTreeState:"clean", GoVersion:"go1.16.9"}
[root@VM-16-14-centos data]
"elastic" has been added to your repositories
[root@VM-16-14-centos data]
"gitlab" has been added to your repositories
[root@VM-16-14-centos data]
"harbor" has been added to your repositories
[root@VM-16-14-centos data]
"bitnami" has been added to your repositories
[root@VM-16-14-centos data]
Error: repo "https://kubernetes-charts-incubator.storage.googleapis.com" is no longer available; try "https://charts.helm.sh/incubator" instead
[root@VM-16-14-centos data]
Error: repo "https://kubernetes-charts.storage.googleapis.com" is no longer available; try "https://charts.helm.sh/stable" instead
[root@VM-16-14-centos data]
"stable" has been added to your repositories
[root@VM-16-14-centos data]
"aliyun" has been added to your repositories
[root@VM-16-14-centos data]
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "aliyun" chart repository
...Successfully got an update from the "stable" chart repository
...Successfully got an update from the "elastic" chart repository
...Successfully got an update from the "gitlab" chart repository
...Successfully got an update from the "harbor" chart repository
...Successfully got an update from the "bitnami" chart repository
Update Complete. ⎈Happy Helming!⎈
[root@VM-16-14-centos data]
NAME URL
elastic https://helm.elastic.co
gitlab https://charts.gitlab.io
harbor https://helm.goharbor.io
bitnami https://charts.bitnami.com/bitnami
stable http://mirror.azure.cn/kubernetes/charts
aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
[root@VM-16-14-centos data]
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
[root@VM-16-14-centos data]
NAME CHART VERSION APP VERSION DESCRIPTION
aliyun/nginx-ingress 0.9.5 0.10.2 An nginx Ingress controller that uses ConfigMap...
aliyun/nginx-lego 0.3.1 Chart for nginx-ingress-controller and kube-lego
bitnami/nginx 9.5.10 1.21.3 Chart for the nginx server
bitnami/nginx-ingress-controller 8.0.11 1.0.4 Chart for the nginx Ingress controller
stable/nginx-ingress 1.41.3 v0.34.1 DEPRECATED! An nginx Ingress controller that us...
stable/nginx-ldapauth-proxy 0.1.6 1.13.5 DEPRECATED - nginx proxy with ldapauth
stable/nginx-lego 0.3.1 Chart for nginx-ingress-controller and kube-lego
bitnami/kong 4.1.6 2.6.0 Kong is a scalable, open source API layer (aka ...
aliyun/gcloud-endpoints 0.1.0 Develop, deploy, protect and monitor your APIs ...
stable/gcloud-endpoints 0.1.2 1 DEPRECATED Develop, deploy, protect and monitor...
[root@VM-16-14-centos ~]
六、kubeview
[root@VM-16-14-centos data]# rz
rz waiting to receive.**B0100000023be50
[root@VM-16-14-centos data]# # Received /Users/shencaifeiyangdekk/Downloads/kubeview-0.1.20.tgz
[root@VM-16-14-centos data]# tar zxvf kubeview-0.1.20.tgz
[root@VM-16-14-centos kubeview]# vim values.yaml
七、weave-scope
[root@VM-16-14-centos data]# kubectl apply -f "https://cloud.weave.works/k8s/scope.yaml?k8s-version=$(kubectl version | base64 | tr -d '\n')"
namespace/weave created
serviceaccount/weave-scope created
clusterrole.rbac.authorization.k8s.io/weave-scope created
clusterrolebinding.rbac.authorization.k8s.io/weave-scope created
deployment.apps/weave-scope-app created
service/weave-scope-app created
deployment.apps/weave-scope-cluster-agent created
daemonset.apps/weave-scope-agent created
[root@VM-16-14-centos data]# kubectl get pod -n weave -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
weave-scope-agent-7ftj2 0/1 ContainerCreating 0 23s 10.206.16.6 vm-16-6-centos <none> <none>
weave-scope-agent-fwcr9 0/1 ContainerCreating 0 23s 10.206.16.14 vm-16-14-centos <none> <none>
weave-scope-agent-jpsxk 0/1 ContainerCreating 0 23s 10.206.16.4 vm-16-4-centos <none> <none>
weave-scope-app-5f9f566559-99vqq 0/1 ContainerCreating 0 23s <none> vm-16-6-centos <none> <none>
weave-scope-cluster-agent-6b6f974dc6-jc9bz 0/1 ContainerCreating 0 23s <none> vm-16-4-centos <none> <none>
[root@VM-16-14-centos data]# kubectl get service -n weave -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
weave-scope-app ClusterIP 10.254.8.221 <none> 80/TCP 109s app=weave-scope,name=weave-scope-app,weave-cloud-component=scope,weave-scope-component=app
[root@VM-16-14-centos data]# kubectl get svc
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 3d7h
default nginx ClusterIP 10.254.169.166 <none> 80/TCP 3d4h
default nginx-nodeport NodePort 10.254.99.248 <none> 18080:30080/TCP 3d1h
ingress-nginx ingress-nginx-controller NodePort 10.254.249.225 <none> 80:30840/TCP,443:31002/TCP 174m
ingress-nginx ingress-nginx-controller-admission ClusterIP 10.254.142.132 <none> 443/TCP 174m
kube-system kube-dns ClusterIP 10.254.0.10 <none> 53/UDP,53/TCP,9153/TCP 3d7h
kube-system metrics-server ClusterIP 10.254.33.253 <none> 443/TCP 2d23h
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.254.62.72 <none> 8000/TCP 3d
kubernetes-dashboard kubernetes-dashboard NodePort 10.254.158.48 <none> 443:31853/TCP 3d
weave weave-scope-app ClusterIP 10.254.8.221 <none> 80/TCP 2m26s