12.部署master
12.1 安装master组件
[root@ansible-server ansible]
[root@ansible-server ansible]
[root@ansible-server kubernetes-master]
files tasks templates vars
[root@ansible-server kubernetes-master]
ETCD_CERT:
- etcd-ca-key.pem
- etcd-ca.pem
- etcd-key.pem
- etcd.pem
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103
[root@ansible-server kubernetes-master]
- name: create /etc/etcd/ssl directory
file:
path: /etc/etcd/ssl
state: directory
when:
- inventory_hostname in groups.master
- name: transfer etcd-ca-key.pem file from etcd01 to master01
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ MASTER01 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to master02
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ MASTER02 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to master03
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ MASTER03 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: create /etc/kubernetes/pki/etcd directory
file:
path: /etc/kubernetes/pki/etcd
state: directory
when:
- inventory_hostname in groups.master
- name: link etcd_ssl to kubernetes pki
file:
src: "/etc/etcd/ssl/{{ item }}"
dest: "/etc/kubernetes/pki/etcd/{{ item }}"
state: link
loop:
"{{ ETCD_CERT }}"
when:
- inventory_hostname in groups.master
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
kube-apiserver kube-controller-manager kubectl kubelet kube-proxy kube-scheduler
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
- name: copy kubernetes files to master
copy:
src: "bin/{{ item }}"
dest: /usr/local/bin/
mode: 0755
loop:
- kube-apiserver
- kube-controller-manager
- kubectl
- kubelet
- kube-proxy
- kube-scheduler
when:
- inventory_hostname in groups.master
- name: create /opt/cni/bin directory
file:
path: /opt/cni/bin
state: directory
when:
- inventory_hostname in groups.master
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
bin cfssl cfssljson
[root@ansible-server kubernetes-master]
...
SERVICE_IP: 10.96.0.1
VIP: 172.31.3.188
K8S_CLUSTER: kubernetes
KUBERNETES_CERT:
- ca.csr
- ca-key.pem
- ca.pem
- apiserver.csr
- apiserver-key.pem
- apiserver.pem
- front-proxy-ca.csr
- front-proxy-ca-key.pem
- front-proxy-ca.pem
- front-proxy-client.csr
- front-proxy-client-key.pem
- front-proxy-client.pem
- controller-manager.csr
- controller-manager-key.pem
- controller-manager.pem
- scheduler.csr
- scheduler-key.pem
- scheduler.pem
- admin.csr
- admin-key.pem
- admin.pem
- sa.key
- sa.pub
KUBECONFIG:
- controller-manager.kubeconfig
- scheduler.kubeconfig
- admin.kubeconfig
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
{
"CN": "{{ K8S_CLUSTER }}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
[root@ansible-server kubernetes-master]
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
[root@ansible-server kubernetes-master]
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
[root@ansible-server kubernetes-master]
{
"CN": "{{ K8S_CLUSTER }}",
"key": {
"algo": "rsa",
"size": 2048
}
}
[root@ansible-server kubernetes-master]
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
[root@ansible-server kubernetes-master]
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
[root@ansible-server kubernetes-master]
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
[root@ansible-server kubernetes-master]
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
[root@ansible-server kubernetes-master]
- name: create /etc/kubernetes/pki directory
file:
path: /etc/kubernetes/pki
state: directory
when:
- inventory_hostname in groups.master
- name: copy cfssl and cfssljson tools
copy:
src: "{{ item }}"
dest: /usr/local/bin
mode: 0755
loop:
- cfssl
- cfssljson
when:
- ansible_hostname=="k8s-master01"
- name: create pki directory
file:
path: /root/pki
state: directory
when:
- ansible_hostname=="k8s-master01"
- name: copy pki files
template:
src: "pki/{{ item }}.j2"
dest: "/root/pki/{{ item }}"
loop:
- ca-csr.json
- ca-config.json
- apiserver-csr.json
- front-proxy-ca-csr.json
- front-proxy-client-csr.json
- manager-csr.json
- scheduler-csr.json
- admin-csr.json
when:
- ansible_hostname=="k8s-master01"
- name: create ca cert
shell:
chdir: /root/pki
cmd: cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
creates: /etc/kubernetes/pki/ca.pem
when:
- ansible_hostname=="k8s-master01"
- name: create apiserver cert
shell:
chdir: /root/pki
cmd: cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname={{ SERVICE_IP }},{{ VIP }},127.0.0.1,{{ K8S_CLUSTER }},{{ K8S_CLUSTER }}.default,{{ K8S_CLUSTER }}.default.svc,{{ K8S_CLUSTER }}.default.svc.cluster,{{ K8S_CLUSTER }}.default.svc.cluster.local,{% for i in groups.master %}{{ hostvars[i].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %} -profile={{ K8S_CLUSTER }} apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
creates: /etc/kubernetes/pki/apiserver.pem
when:
- ansible_hostname=="k8s-master01"
- name: create front-proxy-ca cert
shell:
chdir: /root/pki
cmd: cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
creates: /etc/kubernetes/pki/front-proxy-ca.pem
when:
- ansible_hostname=="k8s-master01"
- name: create front-proxy-client cert
shell:
chdir: /root/pki
cmd: cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile={{ K8S_CLUSTER }} front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
creates: /etc/kubernetes/pki/front-proxy-client.pem
when:
- ansible_hostname=="k8s-master01"
- name: create controller-manager cert
shell:
chdir: /root/pki
cmd: cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile={{ K8S_CLUSTER }} manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
creates: /etc/kubernetes/pki/controller-manager.pem
when:
- ansible_hostname=="k8s-master01"
- name: set-cluster controller-manager.kubeconfig
shell:
cmd: kubectl config set-cluster {{ K8S_CLUSTER }} --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://{{ VIP }}:6443 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-credentials controller-manager.kubeconfig
shell:
cmd: kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-context controller-manager.kubeconfig
shell:
cmd: kubectl config set-context system:kube-controller-manager@{{ K8S_CLUSTER }} --cluster={{ K8S_CLUSTER }} --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: use-context controller-manager.kubeconfig
shell:
cmd: kubectl config use-context system:kube-controller-manager@{{ K8S_CLUSTER }} --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: create scheduler cert
shell:
chdir: /root/pki
cmd: cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile={{ K8S_CLUSTER }} scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
creates: /etc/kubernetes/pki/scheduler.pem
when:
- ansible_hostname=="k8s-master01"
- name: set-cluster scheduler.kubeconfig
shell:
cmd: kubectl config set-cluster {{ K8S_CLUSTER }} --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://{{ VIP }}:6443 --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-credentials scheduler.kubeconfig
shell:
cmd: kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/pki/scheduler.pem --client-key=/etc/kubernetes/pki/scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-context scheduler.kubeconfig
shell:
cmd: kubectl config set-context system:kube-scheduler@{{ K8S_CLUSTER }} --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: use-context scheduler.kubeconfig
shell:
cmd: kubectl config use-context system:kube-scheduler@{{ K8S_CLUSTER }} --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: create admin cert
shell:
chdir: /root/pki
cmd: cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile={{ K8S_CLUSTER }} admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
creates: /etc/kubernetes/pki/admin.pem
when:
- ansible_hostname=="k8s-master01"
- name: set-cluster admin.kubeconfig
shell:
cmd: kubectl config set-cluster {{ K8S_CLUSTER }} --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://{{ VIP }}:6443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-credentials admin.kubeconfig
shell:
cmd: kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-context admin.kubeconfig
shell:
cmd: kubectl config set-context kubernetes-admin@{{ K8S_CLUSTER }} --cluster={{ K8S_CLUSTER }} --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: use-context admin.kubeconfig
shell:
cmd: kubectl config use-context kubernetes-admin@{{ K8S_CLUSTER }} --kubeconfig=/etc/kubernetes/admin.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: create sa.key
shell:
cmd: openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
creates: /etc/kubernetes/pki/sa.key
when:
- ansible_hostname=="k8s-master01"
- name: create sa.pub
shell:
cmd: openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
creates: /etc/kubernetes/pki/sa.pub
when:
- ansible_hostname=="k8s-master01"
- name: transfer cert files from master01 to master02
synchronize:
src: "/etc/kubernetes/pki/{{ item }}"
dest: /etc/kubernetes/pki
mode: pull
loop:
"{{ KUBERNETES_CERT }}"
delegate_to: "{{ MASTER02 }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer cert files from master01 to master03
synchronize:
src: "/etc/kubernetes/pki/{{ item }}"
dest: /etc/kubernetes/pki
mode: pull
loop:
"{{ KUBERNETES_CERT }}"
delegate_to: "{{ MASTER03 }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer kubeconfig files from master01 to master02
synchronize:
src: "/etc/kubernetes/{{ item }}"
dest: /etc/kubernetes/
mode: pull
loop:
"{{ KUBECONFIG }}"
delegate_to: "{{ MASTER02 }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer kubeconfig files from master01 to master03
synchronize:
src: "/etc/kubernetes/{{ item }}"
dest: /etc/kubernetes/
mode: pull
loop:
"{{ KUBECONFIG }}"
delegate_to: "{{ MASTER03 }}"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubernetes-master]
...
KUBE_DIRECTROY:
- /etc/kubernetes/manifests/
- /etc/systemd/system/kubelet.service.d
- /var/lib/kubelet
- /var/log/kubernetes
SERVICE_SUBNET: 10.96.0.0/12
POD_SUBNET: 192.168.0.0/12
MASTER:
- 172.31.3.102
- 172.31.3.103
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
PAUSE_VERSION: 3.4.1
CLUSTERDNS: 10.96.0.10
PKI_DIR: /etc/kubernetes/pki
K8S_DIR: /etc/kubernetes
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address={{ ansible_default_ipv4.address }} \
--service-cluster-ip-range={{ SERVICE_SUBNET }} \
--service-node-port-range=30000-32767 \
--etcd-servers={% for i in groups.etcd %}https://{{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %} \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-master]
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr={{ POD_SUBNET }} \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
[root@ansible-server kubernetes-master]
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-master]
[root@ansible-server kubernetes-master]
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image={{ HARBOR_DOMAIN }}/google_containers/pause:{{ PAUSE_VERSION }}"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
[root@ansible-server kubernetes-master]
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- {{ CLUSTERDNS }}
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
[root@ansible-server kubernetes-master]
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: {{ POD_SUBNET }}
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
[root@ansible-server kubernetes-master]
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.conf \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-master]
- name: create kubernetes directory
file:
path: "{{ item }}"
state: directory
loop:
"{{ KUBE_DIRECTROY }}"
when:
- inventory_hostname in groups.master
- name: copy kube-apiserver.service
template:
src: service/kube-apiserver.service.j2
dest: /lib/systemd/system/kube-apiserver.service
when:
- inventory_hostname in groups.master
- name: start kube-apiserver
systemd:
name: kube-apiserver
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.master
- name: copy kube-controller-manager.service
template:
src: service/kube-controller-manager.service.j2
dest: /lib/systemd/system/kube-controller-manager.service
when:
- inventory_hostname in groups.master
- name: start kube-controller-manager
systemd:
name: kube-controller-manager
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.master
- name: copy kube-scheduler.service
copy:
src: service/kube-scheduler.service
dest: /lib/systemd/system/
when:
- inventory_hostname in groups.master
- name: start kube-scheduler
systemd:
name: kube-scheduler
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.master
- name: set-cluster bootstrap-kubelet.kubeconfig
shell:
cmd: kubectl config set-cluster {{ K8S_CLUSTER }} --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://{{ VIP }}:6443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-credentials bootstrap-kubelet.kubeconfig
shell:
cmd: kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-context bootstrap-kubelet.kubeconfig
shell:
cmd: kubectl config set-context tls-bootstrap-token-user@{{ K8S_CLUSTER }} --cluster={{ K8S_CLUSTER }} --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: use-context bootstrap-kubelet.kubeconfig
shell:
cmd: kubectl config use-context tls-bootstrap-token-user@{{ K8S_CLUSTER }} --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: create user kube config directory
file:
path: /root/.kube
state: directory
when:
- ansible_hostname=="k8s-master01"
- name: copy kubeconfig to user directory
copy:
src: /etc/kubernetes/admin.kubeconfig
dest: /root/.kube/config
remote_src: yes
when:
- ansible_hostname=="k8s-master01"
- name: copy bootstrap.secret.yaml
copy:
src: yaml/bootstrap.secret.yaml
dest: /root
when:
- ansible_hostname=="k8s-master01"
- name: create pod by bootstrap.secret.yaml
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f bootstrap.secret.yaml"
when:
- ansible_hostname=="k8s-master01"
- name: transfer bootstrap-kubelet.kubeconfig file from mater01 to master02 master03
synchronize:
src: /etc/kubernetes/bootstrap-kubelet.kubeconfig
dest: /etc/kubernetes/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ MASTER }}"
when:
- ansible_hostname=="k8s-master01"
- name: copy kubelet.service to master
copy:
src: service/kubelet.service
dest: /lib/systemd/system/
when:
- inventory_hostname in groups.master
- name: docker login
shell:
cmd: docker login -u {{ USERNAME }} -p {{ PASSWORD }} {{ HARBOR_DOMAIN }}
when:
- ansible_hostname=="k8s-master01"
- name: download pause image
shell: |
docker pull registry.aliyuncs.com/google_containers/pause:{{ PAUSE_VERSION }}
docker tag registry.aliyuncs.com/google_containers/pause:{{ PAUSE_VERSION }} {{ HARBOR_DOMAIN }}/google_containers/pause:{{ PAUSE_VERSION }}
docker rmi registry.aliyuncs.com/google_containers/pause:{{ PAUSE_VERSION }}
docker push {{ HARBOR_DOMAIN }}/google_containers/pause:{{ PAUSE_VERSION }}
when:
- ansible_hostname=="k8s-master01"
- name: copy 10-kubelet.conf to master
template:
src: config/10-kubelet.conf.j2
dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
when:
- inventory_hostname in groups.master
- name: copy kubelet-conf.yml to master
template:
src: config/kubelet-conf.yml.j2
dest: /etc/kubernetes/kubelet-conf.yml
when:
- inventory_hostname in groups.master
- name: start kubelet for master
systemd:
name: kubelet
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.master
- name: create serviceaccount
shell:
cmd: kubectl -n kube-system create serviceaccount kube-proxy
ignore_errors: yes
when:
- ansible_hostname=="k8s-master01"
- name: create clusterrolebinding
shell:
cmd: kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
ignore_errors: yes
when:
- ansible_hostname=="k8s-master01"
- name: get SECRET var
shell:
cmd: kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}'
register: SECRET
when:
- ansible_hostname=="k8s-master01"
- name: get JWT_TOKEN var
shell:
cmd: kubectl -n kube-system get secret/{{ SECRET.stdout }} --output=jsonpath='{.data.token}' | base64 -d
register: JWT_TOKEN
when:
- ansible_hostname=="k8s-master01"
- name: set-cluster kube-proxy.kubeconfig
shell:
cmd: kubectl config set-cluster {{ K8S_CLUSTER }} --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://{{ VIP }}:6443 --kubeconfig={{ K8S_DIR }}/kube-proxy.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-credentials kube-proxy.kubeconfig
shell:
cmd: kubectl config set-credentials {{ K8S_CLUSTER }} --token={{ JWT_TOKEN.stdout }} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: set-context kube-proxy.kubeconfig
shell:
cmd: kubectl config set-context {{ K8S_CLUSTER }} --cluster={{ K8S_CLUSTER }} --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: use-context kube-proxy.kubeconfig
shell:
cmd: kubectl config use-context {{ K8S_CLUSTER }} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
when:
- ansible_hostname=="k8s-master01"
- name: transfer kube-proxy.kubeconfig files from master01 to master02 master03
synchronize:
src: /etc/kubernetes/kube-proxy.kubeconfig
dest: /etc/kubernetes/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ MASTER }}"
when:
- ansible_hostname=="k8s-master01"
- name: copy kube-proxy.conf to master
template:
src: config/kube-proxy.conf.j2
dest: /etc/kubernetes/kube-proxy.conf
when:
- inventory_hostname in groups.master
- name: copy kube-proxy.service to master
copy:
src: service/kube-proxy.service
dest: /lib/systemd/system/
when:
- inventory_hostname in groups.master
- name: start kube-proxy to master
systemd:
name: kube-proxy
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.master
[root@ansible-server kubernetes-master]
- name: install CentOS or Rocky bash-completion tool
yum:
name: bash-completion
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-master01"
- name: install Ubuntu bash-completion tool
apt:
name: bash-completion
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-master01"
- name: source completion bash
shell: |
"source <(kubectl completion bash)"
echo "source <(kubectl completion bash)" >> ~/.bashrc
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubernetes-master]
- include: copy_etcd_cert.yml
- include: copy_kubernetes_file.yml
- include: create_kubernetes_cert.yml
- include: master_config.yml
- include: install_automatic_completion_tool.yml
[root@ansible-server kubernetes-master]
[root@ansible-server ansible]
roles/kubernetes-master/
├── files
│ ├── bin
│ │ ├── kube-apiserver
│ │ ├── kube-controller-manager
│ │ ├── kubectl
│ │ ├── kubelet
│ │ ├── kube-proxy
│ │ └── kube-scheduler
│ ├── cfssl
│ ├── cfssljson
│ ├── service
│ │ ├── kubelet.service
│ │ ├── kube-proxy.service
│ │ └── kube-scheduler.service
│ └── yaml
│ └── bootstrap.secret.yaml
├── tasks
│ ├── copy_etcd_cert.yml
│ ├── copy_kubernetes_file.yml
│ ├── create_kubernetes_cert.yml
│ ├── install_automatic_completion_tool.yml
│ ├── main.yml
│ └── master_config.yml
├── templates
│ ├── config
│ │ ├── 10-kubelet.conf.j2
│ │ ├── kubelet-conf.yml.j2
│ │ └── kube-proxy.conf.j2
│ ├── pki
│ │ ├── admin-csr.json.j2
│ │ ├── apiserver-csr.json.j2
│ │ ├── ca-config.json.j2
│ │ ├── ca-csr.json.j2
│ │ ├── front-proxy-ca-csr.json.j2
│ │ ├── front-proxy-client-csr.json.j2
│ │ ├── manager-csr.json.j2
│ │ └── scheduler-csr.json.j2
│ └── service
│ ├── kube-apiserver.service.j2
│ └── kube-controller-manager.service.j2
└── vars
└── main.yml
10 directories, 32 files
[root@ansible-server ansible]
---
- hosts: master:etcd
roles:
- role: kubernetes-master
[root@ansible-server ansible]
12.2 验证master
[root@k8s-master01 ~]
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady <none> 7s v1.21.8
k8s-master02.example.local NotReady <none> 7s v1.21.8
k8s-master03.example.local NotReady <none> 7s v1.21.8
13.部署node
13.1 安装node组件
[root@ansible-server ansible]
[root@ansible-server ansible]
[root@ansible-server kubernetes-node]
files tasks templates vars
[root@ansible-server kubernetes-node]
[root@ansible-server kubernetes-node]
[root@ansible-server kubernetes-node]
[root@ansible-server kubernetes-node]
kubelet kube-proxy
[root@ansible-server kubernetes-node]
- name: copy kubernetes files to node
copy:
src: "bin/{{ item }}"
dest: /usr/local/bin/
mode: 0755
loop:
- kubelet
- kube-proxy
when:
- inventory_hostname in groups.node
- name: create /opt/cni/bin directory
file:
path: /opt/cni/bin
state: directory
when:
- inventory_hostname in groups.node
[root@ansible-server kubernetes-node]
ETCD_CERT:
- etcd-ca-key.pem
- etcd-ca.pem
- etcd-key.pem
- etcd.pem
NODE01: 172.31.3.111
NODE02: 172.31.3.112
NODE03: 172.31.3.113
[root@ansible-server kubernetes-node]
- name: create /etc/etcd/ssl directory for node
file:
path: /etc/etcd/ssl
state: directory
when:
- inventory_hostname in groups.node
- name: transfer etcd-ca-key.pem file from etcd01 to node01
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ NODE01 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to node02
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ NODE02 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to node03
synchronize:
src: "/etc/etcd/ssl/{{ item }}"
dest: /etc/etcd/ssl/
mode: pull
loop:
"{{ ETCD_CERT }}"
delegate_to: "{{ NODE03 }}"
when:
- ansible_hostname=="k8s-etcd01"
- name: create /etc/kubernetes/pki/etcd directory
file:
path: /etc/kubernetes/pki/etcd
state: directory
when:
- inventory_hostname in groups.node
- name: link etcd_ssl to kubernetes pki
file:
src: "/etc/etcd/ssl/{{ item }}"
dest: "/etc/kubernetes/pki/etcd/{{ item }}"
state: link
loop:
"{{ ETCD_CERT }}"
when:
- inventory_hostname in groups.node
[root@ansible-server kubernetes-node]
...
NODE:
- 172.31.3.111
- 172.31.3.112
- 172.31.3.113
[root@ansible-server kubernetes-node]
- name: create /etc/kubernetes/pki directory to node
file:
path: /etc/kubernetes/pki
state: directory
when:
- inventory_hostname in groups.node
- name: transfer ca.pem file from mater01 to node
synchronize:
src: /etc/kubernetes/pki/ca.pem
dest: /etc/kubernetes/pki/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ NODE }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer ca-key.pem file from mater01 to node
synchronize:
src: /etc/kubernetes/pki/ca-key.pem
dest: /etc/kubernetes/pki/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ NODE }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer front-proxy-ca.pem file from mater01 to node
synchronize:
src: /etc/kubernetes/pki/front-proxy-ca.pem
dest: /etc/kubernetes/pki/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ NODE }}"
when:
- ansible_hostname=="k8s-master01"
- name: transfer bootstrap-kubelet.kubeconfig file from mater01 to node
synchronize:
src: /etc/kubernetes/bootstrap-kubelet.kubeconfig
dest: /etc/kubernetes/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ NODE }}"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubernetes-node]
...
KUBE_DIRECTROY:
- /etc/kubernetes/manifests/
- /etc/systemd/system/kubelet.service.d
- /var/lib/kubelet
- /var/log/kubernetes
HARBOR_DOMAIN: harbor.raymonds.cc
PAUSE_VERSION: 3.2
CLUSTERDNS: 10.96.0.10
POD_SUBNET: 192.168.0.0/12
[root@ansible-server kubernetes-node]
[root@ansible-server kubernetes-node]
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-node]
[root@ansible-server kubernetes-node]
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image={{ HARBOR_DOMAIN }}/google_containers/pause:{{ PAUSE_VERSION }}"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
[root@ansible-server kubernetes-node]
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- {{ CLUSTERDNS }}
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
[root@ansible-server kubernetes-node]
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: {{ POD_SUBNET }}
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
[root@ansible-server kubernetes-node]
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.conf \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
[root@ansible-server kubernetes-node]
- name: create kubernetes directory to node
file:
path: "{{ item }}"
state: directory
loop:
"{{ KUBE_DIRECTROY }}"
when:
- inventory_hostname in groups.node
- name: copy kubelet.service to node
copy:
src: service/kubelet.service
dest: /lib/systemd/system/
when:
- inventory_hostname in groups.node
- name: copy 10-kubelet.conf to node
template:
src: config/10-kubelet.conf.j2
dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
when:
- inventory_hostname in groups.node
- name: copy kubelet-conf.yml to node
template:
src: config/kubelet-conf.yml.j2
dest: /etc/kubernetes/kubelet-conf.yml
when:
- inventory_hostname in groups.node
- name: start kubelet for node
systemd:
name: kubelet
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.node
- name: transfer kube-proxy.kubeconfig files from master01 to node
synchronize:
src: /etc/kubernetes/kube-proxy.kubeconfig
dest: /etc/kubernetes/
mode: pull
delegate_to: "{{ item }}"
loop:
"{{ NODE }}"
when:
- ansible_hostname=="k8s-master01"
- name: copy kube-proxy.conf to node
template:
src: config/kube-proxy.conf.j2
dest: /etc/kubernetes/kube-proxy.conf
when:
- inventory_hostname in groups.node
- name: copy kube-proxy.service to node
copy:
src: service/kube-proxy.service
dest: /lib/systemd/system/
when:
- inventory_hostname in groups.node
- name: start kube-proxy to node
systemd:
name: kube-proxy
state: started
enabled: yes
daemon_reload: yes
when:
- inventory_hostname in groups.node
[root@ansible-server kubernetes-node]
- include: copy_kubernetes_file.yaml
- include: copy_etcd_cert.yaml
- include: copy_kubernetes_cert.yml
- include: node_config.yml
[root@ansible-server kubernetes-node]
[root@ansible-server ansible]
roles/kubernetes-node/
├── files
│ ├── bin
│ │ ├── kubelet
│ │ └── kube-proxy
│ └── service
│ ├── kubelet.service
│ └── kube-proxy.service
├── tasks
│ ├── copy_etcd_cert.yaml
│ ├── copy_kubernetes_cert.yml
│ ├── copy_kubernetes_file.yaml
│ ├── main.yml
│ └── node_config.yml
├── templates
│ └── config
│ ├── 10-kubelet.conf.j2
│ ├── kubelet-conf.yml.j2
│ └── kube-proxy.conf.j2
└── vars
└── main.yml
7 directories, 13 files
[root@ansible-server ansible]
---
- hosts: master:node:etcd
roles:
- role: kubernetes-node
[root@ansible-server ansible]
13.2 验证node
[root@k8s-master01 ~]
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady <none> 3m8s v1.21.8
k8s-master02.example.local NotReady <none> 3m8s v1.21.8
k8s-master03.example.local NotReady <none> 3m8s v1.21.8
k8s-node01.example.local NotReady <none> 1s v1.21.8
k8s-node02.example.local NotReady <none> 1s v1.21.8
k8s-node03.example.local NotReady <none> 1s v1.21.8