coredns yaml 部署

546 阅读6分钟
原文链接: yq.aliyun.com
# cat coredns.yaml --- apiVersion: v1 kind: ServiceAccount imagePullSecrets: - name: default metadata:   name: coredns   namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata:   labels:     kubernetes.io/bootstrapping: rbac-defaults   name: system:coredns rules: - apiGroups:   - ""   resources:   - endpoints   - services   - pods   - namespaces   verbs:   - list   - watch --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata:   annotations:     rbac.authorization.kubernetes.io/autoupdate: "true"   labels:     kubernetes.io/bootstrapping: rbac-defaults   name: system:coredns roleRef:   apiGroup: rbac.authorization.k8s.io   kind: ClusterRole   name: system:coredns subjects: - kind: ServiceAccount   name: coredns   namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata:   name: coredns   namespace: kube-system data:   Corefile: |     .:53 {         errors         health         kubernetes cluster.local REVERSE_CIDRS {           pods insecure           upstream           fallthrough in-addr.arpa ip6.arpa         }         prometheus :9153         proxy . /etc/resolv.conf         cache 30     } --- apiVersion: extensions/v1beta1 kind: Deployment metadata:   name: coredns   namespace: kube-system   labels:     k8s-app: kube-dns     kubernetes.io/name: "CoreDNS" spec:   replicas: 2   strategy:     type: RollingUpdate     rollingUpdate:       maxUnavailable: 1   selector:     matchLabels:       k8s-app: kube-dns   template:     metadata:       labels:         k8s-app: kube-dns     spec:       serviceAccountName: coredns       tolerations:         - key: "CriticalAddonsOnly"           operator: "Exists"       containers:       - name: coredns         image: hub.cloud.pub/coredns/coredns:1.1.3         imagePullPolicy: IfNotPresent         args: [ "-conf", "/etc/coredns/Corefile" ]         volumeMounts:         - name: config-volume           mountPath: /etc/coredns         ports:         - containerPort: 53           name: dns           protocol: UDP         - containerPort: 53           name: dns-tcp           protocol: TCP         - containerPort: 9153           name: metrics           protocol: TCP         livenessProbe:           httpGet:             path: /health             port: 8080             scheme: HTTP           initialDelaySeconds: 60           timeoutSeconds: 5           successThreshold: 1           failureThreshold: 5       dnsPolicy: Default       nodeSelector:         caas_cluster: kube-system       volumes:         - name: config-volume           configMap:             name: coredns             items:             - key: Corefile               path: Corefile --- apiVersion: v1 kind: Service metadata:   name: coredns   namespace: kube-system   annotations:     prometheus.io/scrape: "true"   labels:     k8s-app: coredns     kubernetes.io/cluster-service: "true"     kubernetes.io/name: "CoreDNS" spec:   selector:     k8s-app: kube-dns   clusterIP: 172.254.0.2   ports:   - name: dns     port: 53     protocol: UDP   - name: dns-tcp     port: 53     protocol: TCP
----
?$ cat dns.yaml
--- # ConfigMap apiVersion: v1 kind: ConfigMap metadata:   name: kube-dns   namespace: kube-system   labels:     addonmanager.kubernetes.io/mode: EnsureExists
---
# ServiceAccount apiVersion: v1 kind: ServiceAccount imagePullSecrets: - name: default metadata:   name: kube-dns   namespace: kube-system   labels:     kubernetes.io/cluster-service: "true"     addonmanager.kubernetes.io/mode: Reconcile
---
# Deployment apiVersion: extensions/v1beta1 kind: Deployment metadata:   name: kube-dns   namespace: kube-system   labels:     k8s-app: kube-dns     kubernetes.io/cluster-service: "true"     addonmanager.kubernetes.io/mode: Reconcile spec:   replicas: 2   strategy:     rollingUpdate:       maxSurge: 10%       maxUnavailable: 0   selector:     matchLabels:       k8s-app: kube-dns   template:     metadata:       labels:         k8s-app: kube-dns       annotations:         scheduler.alpha.kubernetes.io/critical-pod: ''     spec:       tolerations:       - key: "CriticalAddonsOnly"         operator: "Exists"       volumes:       - name: kube-dns-config         configMap:           name: kube-dns           optional: true       containers:       - name: kubedns         image: hub.cloud.pub/k8s/k8s-dns-kube-dns-amd64:1.14.2         resources:           limits:             memory: 170Mi           requests:             cpu: 100m             memory: 70Mi         livenessProbe:           httpGet:             path: /healthcheck/kubedns             port: 10054             scheme: HTTP           initialDelaySeconds: 60           timeoutSeconds: 5           successThreshold: 1           failureThreshold: 5         readinessProbe:           httpGet:             path: /readiness             port: 8081             scheme: HTTP           # we poll on pod startup for the Kubernetes master service and           # only setup the /readiness HTTP server once that's available.           initialDelaySeconds: 3           timeoutSeconds: 5         args:         - --domain=cluster.local.         - --dns-port=10053         - --config-dir=/kube-dns-config         - --v=2         #__PILLAR__FEDERATIONS__DOMAIN__MAP__         env:         - name: PROMETHEUS_PORT           value: "10055"         ports:         - containerPort: 10053           name: dns-local           protocol: UDP         - containerPort: 10053           name: dns-tcp-local           protocol: TCP         - containerPort: 10055           name: metrics           protocol: TCP         volumeMounts:         - name: kube-dns-config           mountPath: /kube-dns-config       - name: dnsmasq         image: hub.cloud.pub/k8s/k8s-dns-dnsmasq-nanny-amd64:1.14.2         livenessProbe:           httpGet:             path: /healthcheck/dnsmasq             port: 10054             scheme: HTTP           initialDelaySeconds: 60           timeoutSeconds: 5           successThreshold: 1           failureThreshold: 5         args:         - -v=2         - -logtostderr         - -configDir=/etc/k8s/dns/dnsmasq-nanny         - -restartDnsmasq=true         - --         - -k         - --cache-size=1000         - --log-facility=-         - --server=/cluster.local./127.0.0.1#10053         - --server=/in-addr.arpa/127.0.0.1#10053         - --server=/ip6.arpa/127.0.0.1#10053         ports:         - containerPort: 53           name: dns           protocol: UDP         - containerPort: 53           name: dns-tcp           protocol: TCP         # see: https://github.com/kubernetes/kubernetes/issues/29055 for details         resources:           requests:             cpu: 150m             memory: 20Mi         volumeMounts:         - name: kube-dns-config           mountPath: /etc/k8s/dns/dnsmasq-nanny       - name: sidecar         image: hub.cloud.pub/k8s/k8s-dns-sidecar-amd64:1.14.2         livenessProbe:           httpGet:             path: /metrics             port: 10054             scheme: HTTP           initialDelaySeconds: 60           timeoutSeconds: 5           successThreshold: 1           failureThreshold: 5         args:         - --v=2         - --logtostderr         - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A         - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A         ports:         - containerPort: 10054           name: metrics           protocol: TCP         resources:           requests:             memory: 20Mi             cpu: 10m       dnsPolicy: Default       serviceAccountName: kube-dns       nodeSelector:          caas_cluster: kube-system
---
# KubeService apiVersion: v1 kind: Service metadata:   name: kube-dns   namespace: kube-system   labels:     k8s-app: kube-dns     kubernetes.io/cluster-service: "true"     addonmanager.kubernetes.io/mode: Reconcile     kubernetes.io/name: "KubeDNS" spec:   selector:     k8s-app: kube-dns   clusterIP: 172.254.0.2   ports:   - name: dns     port: 53     protocol: UDP   - name: dns-tcp     port: 53     protocol: TCP ----