a36.ansible 生产实战案例 -- 基于二进制包安装kubernetes v1.22 -- 集群部署(一)

103 阅读23分钟

源码下载地址:github.com/raymond9999…

1.高可用Kubernetes集群规划

角色机器名机器配置ip地址安装软件
ansibleansible-server.example.local2C2G172.31.3.100ansible
master1k8s-master01.example.local2C4G172.31.3.101chrony-client、docker、kube-controller-manager、kube-scheduler、kube-apiserver、kubelet、kube-proxy、kubectl
master2k8s-master02.example.local2C4G172.31.3.102chrony-client、docker、kube-controller-manager、kube-scheduler、kube-apiserver、kubelet、kube-proxy、kubectl
master3k8s-master03.example.local2C4G172.31.3.103chrony-client、docker、kube-controller-manager、kube-scheduler、kube-apiserver、kubelet、kube-proxy、kubectl
ha1k8s-ha01.example.local2C2G172.31.3.104 172.31.3.188(vip)chrony-server、haproxy、keepalived
ha2k8s-ha02.example.local2C2G172.31.3.105chrony-server、haproxy、keepalived
harbor1k8s-harbor01.example.local2C2G172.31.3.106chrony-client、docker、docker-compose、harbor
harbor2k8s-harbor02.example.local2C2G172.31.3.107chrony-client、docker、docker-compose、harbor
etcd1k8s-etcd01.example.local2C2G172.31.3.108chrony-client、docker、etcd
etcd2k8s-etcd02.example.local2C2G172.31.3.109chrony-client、docker、etcd
etcd3k8s-etcd03.example.local2C2G172.31.3.110chrony-client、docker、etcd
node1k8s-node01.example.local2C4G172.31.3.111chrony-client、docker、kubelet、kube-proxy
node2k8s-node02.example.local2C4G172.31.3.112chrony-client、docker、kubelet、kube-proxy
node3k8s-node03.example.local2C4G172.31.3.113chrony-client、docker、kubelet、kube-proxy

软件版本信息和Pod、Service网段规划:

配置信息备注
支持的操作系统版本CentOS 7.9/stream 8、Rocky 8、Ubuntu 18.04/20.04
Docker版本20.10.14
kubernetes版本1.22.8
Pod网段192.168.0.0/12
Service网段10.96.0.0/12

2.安装ansible和配置

2.1 安装ansible

#CentOS
[root@ansible-server ~]# yum -y install ansible

[root@ansible-server ~]# ansible --version
ansible 2.9.25
  config file = /data/ansible/ansible.cfg
  configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/site-packages/ansible
  executable location = /usr/bin/ansible
  python version = 2.7.5 (default, Oct 14 2020, 14:45:30) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]

#ubuntu18.04安装最新版的ansible
root@ubuntu1804:~# apt update

root@ubuntu1804:~# apt -y install software-properties-common

root@ubuntu1804:~# apt-add-repository --yes --update ppa:ansible/ansible

root@ubuntu1804:~# apt -y install ansible
root@ubuntu1804:~# ansible --version
ansible 2.9.27
  config file = /etc/ansible/ansible.cfg
  configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/dist-packages/ansible
  executable location = /usr/bin/ansible
  python version = 2.7.17 (default, Feb 27 2021, 15:10:58) [GCC 7.5.0]

#ubuntu 20.04安装
[root@ubuntu ~]# apt -y install ansible

2.2 配置ansible

[root@ansible-server ~]# mkdir /data/ansible
[root@ansible-server ~]# cd /data/ansible

[root@ansible-server ansible]# vim ansible.cfg
[defaults]
inventory      = ./inventory
forks          = 10
roles_path    = ./roles
remote_user = root

#下面的IP根据自己的k8s集群主机规划设置
[root@ansible-server ansible]# vim inventory 
[master]
172.31.3.101 hname=k8s-master01
172.31.3.102 hname=k8s-master02
172.31.3.103 hname=k8s-master03

[ha]
172.31.3.104 hname=k8s-ha01
172.31.3.105 hname=k8s-ha02

[harbor]
172.31.3.106 hname=k8s-harbor01
172.31.3.107 hname=k8s-harbor02

[etcd]
172.31.3.108 hname=k8s-etcd01
172.31.3.109 hname=k8s-etcd02
172.31.3.110 hname=k8s-etcd03

[node]
172.31.3.111 hname=k8s-node01
172.31.3.112 hname=k8s-node02
172.31.3.113 hname=k8s-node03

[all:vars]
domain=example.local

[k8s_cluster:children]
master
node

[chrony_server:children]
ha

[chrony_client:children]
master
node
harbor
etcd

[keepalives_master]
172.31.3.104

[keepalives_backup]
172.31.3.105

[haproxy:children]
ha

[master01]
172.31.3.101

3.设置客户端网卡名和ip

#rocky8和centos系统设置
[root@172 ~]# bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 11
Rocky 8.5 网卡名已修改成功,请重新启动系统后才能生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 12
请输入IP地址:172.31.0.101
IP 172.31.0.101  available!
请输入子网掩码位数:21
请输入网关地址:172.31.0.2
IP 172.31.0.2  available!
Rocky 8.5 IP地址和网关地址已修改成功,请重新启动系统后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21

#ubuntu系统设置
[C:\~]$ ssh raymond@172.31.7.3


Connecting to 172.31.7.3:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.

Welcome to Ubuntu 18.04.6 LTS (GNU/Linux 4.15.0-156-generic x86_64)

 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/advantage

  System information as of Mon Dec 27 13:56:42 CST 2021

  System load:  0.17              Processes:            193
  Usage of /:   2.1% of 91.17GB   Users logged in:      1
  Memory usage: 10%               IP address for ens33: 172.31.7.3
  Swap usage:   0%

 * Super-optimized for small spaces - read how we shrank the memory
   footprint of MicroK8s to make it the smallest full K8s around.

   https://ubuntu.com/blog/microk8s-memory-optimisation

19 updates can be applied immediately.
18 of these updates are standard security updates.
To see these additional updates run: apt list --upgradable

New release '20.04.3 LTS' available.
Run 'do-release-upgrade' to upgrade to it.


Last login: Mon Dec 27 13:56:31 2021
/usr/bin/xauth:  file /home/raymond/.Xauthority does not exist
To run a command as administrator (user "root"), use "sudo <command>".
See "man sudo_root" for details.

raymond@ubuntu1804:~$ bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 18
请输入密码: 123456
[sudo] password for raymond: Enter new UNIX password: Retype new UNIX password: passwd: password updated successfully
Ubuntu 18.04 root用户登录已设置完成,请重新登录后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21
raymond@ubuntu1804:~$ exit
logout

Connection closed.

Disconnected from remote host(172.31.7.3:22) at 13:57:16.

Type `help' to learn how to use Xshell prompt.

[C:\~]$ ssh root@172.31.7.3


Connecting to 172.31.7.3:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.

Welcome to Ubuntu 18.04.6 LTS (GNU/Linux 4.15.0-156-generic x86_64)

 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/advantage

  System information as of Mon Dec 27 13:57:47 CST 2021

  System load:  0.06              Processes:            199
  Usage of /:   2.1% of 91.17GB   Users logged in:      1
  Memory usage: 11%               IP address for ens33: 172.31.7.3
  Swap usage:   0%

 * Super-optimized for small spaces - read how we shrank the memory
   footprint of MicroK8s to make it the smallest full K8s around.

   https://ubuntu.com/blog/microk8s-memory-optimisation

19 updates can be applied immediately.
18 of these updates are standard security updates.
To see these additional updates run: apt list --upgradable

New release '20.04.3 LTS' available.
Run 'do-release-upgrade' to upgrade to it.



The programs included with the Ubuntu system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.

Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
applicable law.

/usr/bin/xauth:  file /root/.Xauthority does not exist
root@ubuntu1804:~# mv /home/raymond/reset.sh .
root@ubuntu1804:~# bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 11
Ubuntu 18.04 网卡名已修改成功,请重新启动系统后才能生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 12
请输入IP地址:172.31.0.103
IP 172.31.0.103  available!
请输入子网掩码位数:21
请输入网关地址:172.31.0.2
IP 172.31.0.2  available!
Ubuntu 18.04 IP地址和网关地址已修改成功,请重新启动系统后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21

4.实现基于key验证的脚本

#下面的IP根据自己的k8s集群主机规划设置
[root@ansible-server ansible]# cat ssh_key.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2021-12-20
#FileName:      ssh_key.sh
#URL:           raymond.blog.csdn.net
#Description:   ssh_key for CentOS 7/8 & Ubuntu 18.04/24.04 & Rocky 8
#Copyright (C): 2021 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

NET_NAME=`ip addr |awk -F"[: ]" '/^2: e.*/{print $3}'`
IP=`ip addr show ${NET_NAME}| awk -F" +|/" '/global/{print $3}'`
export SSHPASS=123456
HOSTS="
172.31.3.101
172.31.3.102
172.31.3.103
172.31.3.104
172.31.3.105
172.31.3.106
172.31.3.107
172.31.3.108
172.31.3.109
172.31.3.110
172.31.3.111
172.31.3.112
172.31.3.113"

os(){
    OS_ID=`sed -rn '/^NAME=/s@.*="([[:alpha:]]+).*"$@\1@p' /etc/os-release`
}

ssh_key_push(){
    rm -f ~/.ssh/id_rsa*
    ssh-keygen -f /root/.ssh/id_rsa -P '' &> /dev/null
    if [ ${OS_ID} == "CentOS" -o ${OS_ID} == "Rocky" ] &> /dev/null;then
        rpm -q sshpass &> /dev/null || { ${COLOR}"安装sshpass软件包"${END};yum -y install sshpass &> /dev/null; }
    else
        dpkg -S sshpass &> /dev/null || { ${COLOR}"安装sshpass软件包"${END};apt -y install sshpass &> /dev/null; }
    fi
    sshpass -e ssh-copy-id -o StrictHostKeyChecking=no ${IP} &> /dev/null
    [ $? -eq 0 ] && echo ${IP} is finished || echo ${IP} is false

    for i in ${HOSTS};do
        sshpass -e scp -o StrictHostKeyChecking=no -r /root/.ssh root@${i}: &> /dev/null
        [ $? -eq 0 ] && echo ${i} is finished || echo ${i} is false
    done

    for i in ${HOSTS};do
        scp /root/.ssh/known_hosts ${i}:.ssh/ &> /dev/null
        [ $? -eq 0 ] && echo ${i} is finished || echo ${i} is false
    done
}

main(){
    os
    ssh_key_push
}

main

[root@ansible-server ansible]# bash ssh_key.sh 
172.31.3.100 is finished
172.31.3.101 is finished
172.31.3.102 is finished
172.31.3.103 is finished
172.31.3.104 is finished
172.31.3.105 is finished
172.31.3.106 is finished
172.31.3.107 is finished
172.31.3.108 is finished
172.31.3.109 is finished
172.31.3.110 is finished
172.31.3.111 is finished
172.31.3.112 is finished
172.31.3.113 is finished
172.31.3.101 is finished
172.31.3.102 is finished
172.31.3.103 is finished
172.31.3.104 is finished
172.31.3.105 is finished
172.31.3.106 is finished
172.31.3.107 is finished
172.31.3.108 is finished
172.31.3.109 is finished
172.31.3.110 is finished
172.31.3.111 is finished
172.31.3.112 is finished
172.31.3.113 is finished

5.系统初始化和安装软件包

5.1 系统初始化

[root@ansible-server ansible]# mkdir -p roles/reset/{tasks,templates,vars}

[root@ansible-server ansible]# cd roles/reset/
[root@ansible-server reset]# ls
tasks  templates  vars

[root@ansible-server reset]# vim templates/yum8.repo.j2 
[BaseOS]
name=BaseOS
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/BaseOS/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever-stream/BaseOS/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[AppStream]
name=AppStream
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/AppStream/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever-stream/AppStream/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[extras]
name=extras
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/extras/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever-stream/extras/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

{% if ansible_distribution =="Rocky" %}
[plus]
{% elif ansible_distribution=="CentOS" %}
[centosplus]
{% endif %}
{% if ansible_distribution =="Rocky" %}
name=plus
{% elif ansible_distribution=="CentOS" %}
name=centosplus
{% endif %}
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/plus/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever-stream/centosplus/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[PowerTools]
name=PowerTools
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever-stream/PowerTools/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[epel]
name=epel
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/fedora/epel/$releasever/Everything/$basearch/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/epel/$releasever/Everything/$basearch/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=https://{{ ROCKY_URL }}/fedora/epel/RPM-GPG-KEY-EPEL-$releasever
{% elif ansible_distribution=="CentOS" %}
gpgkey=https://{{ URL }}/epel/RPM-GPG-KEY-EPEL-$releasever
{% endif %}

[root@ansible-server reset]# vim templates/yum7.repo.j2 
[base]
name=base
baseurl=https://{{ URL }}/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[extras]
name=extras
baseurl=https://{{ URL }}/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[updates]
name=updates
baseurl=https://{{ URL }}/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[centosplus]
name=centosplus
baseurl=https://{{ URL }}/centos/$releasever/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[epel]
name=epel
baseurl=https://{{ URL }}/epel/$releasever/$basearch/
gpgcheck=1
gpgkey=https://{{ URL }}/epel/RPM-GPG-KEY-EPEL-$releasever

[root@ansible-server reset]#  vim templates/apt.list.j2 
deb http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }} main restricted universe multiverse
deb-src http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }} main restricted universe multiverse

deb http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-security main restricted universe multiverse
deb-src http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-security main restricted universe multiverse

deb http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-updates main restricted universe multiverse
deb-src http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-updates main restricted universe multiverse

deb http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-proposed main restricted universe multiverse
deb-src http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-proposed main restricted universe multiverse

deb http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-backports main restricted universe multiverse
deb-src http://{{ URL }}/ubuntu/ {{ ansible_distribution_release }}-backports main restricted universe multiverse

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server reset]# vim vars/main.yml
VIP: 172.31.3.188
HARBOR_DOMAIN: harbor.raymonds.cc
ROCKY_URL: mirrors.ustc.edu.cn
URL: mirrors.cloud.tencent.com

[root@ansible-server reset]# vim tasks/set_hostname.yml
- name: set hostname
  hostname:
    name: "{{ hname }}.{{ domain }}"

[root@ansible-server reset]# vim tasks/set_hosts.yml
- name: set hosts file
  lineinfile:
    path: "/etc/hosts"
    line: "{{ item }} {{hostvars[item].ansible_hostname}}.{{ domain }} {{hostvars[item].ansible_hostname}}"
  loop:
    "{{ play_hosts }}"
- name: set hosts file2
  lineinfile:
    path: "/etc/hosts"
    line: "{{ item }}"
  loop:
    - "{{ VIP }} k8s-lb"
    - "{{ VIP }} {{ HARBOR_DOMAIN }}"

[root@ansible-server reset]# vim tasks/disable_selinux.yml
- name: disable selinux
  replace:
    path: /etc/sysconfig/selinux
    regexp: '^(SELINUX=).*'
    replace: '\1disabled'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")

[root@ansible-server reset]# vim tasks/disable_firewall.yml
- name: disable firewall
  systemd:
    name: firewalld
    state: stopped
    enabled: no
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: disable ufw
  systemd:
    name: ufw
    state: stopped
    enabled: no
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/disable_networkmanager.yml
- name: disable NetworkManager
  systemd:
    name: NetworkManager
    state: stopped
    enabled: no
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"

[root@ansible-server reset]# vim tasks/disable_swap.yml
- name: disable swap
  replace:
    path: /etc/fstab
    regexp: '^(.*swap.*)'
    replace: '#\1'
- name: get sd number
  shell:
    cmd: lsblk|awk -F"[ └─]" '/SWAP/{printf $3}'
  register: SD_NAME
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: disable swap for ubuntu20
  shell:
    cmd: systemctl mask dev-{{ SD_NAME.stdout}}.swap
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"

[root@ansible-server reset]# vim tasks/set_limits.yml
- name: set limit
  shell:
    cmd: ulimit -SHn 65535
- name: set limits.conf file
  lineinfile:
    path: "/etc/security/limits.conf"
    line: "{{ item }}"
  loop:
    - "* soft nofile 655360"
    - "* hard nofile 131072"
    - "* soft nproc 655350"
    - "* hard nproc 655350"
    - "* soft memlock unlimited"
    - "* hard memlock unlimited" 

[root@ansible-server reset]# vim tasks/optimization_sshd.yml
- name: optimization sshd disable UseDNS
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^#(UseDNS).*'
    replace: '\1 no'
- name: optimization sshd diaable CentOS or Rocky GSSAPIAuthentication
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^(GSSAPIAuthentication).*'
    replace: '\1 no'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: optimization sshd diaable Ubuntu GSSAPIAuthentication
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^#(GSSAPIAuthentication).*'
    replace: '\1 no'
  notify:
    - restart sshd
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/set_alias.yml
- name: set CentOS or Rocky alias
  lineinfile:
    path: ~/.bashrc
    line: "{{ item }}"
  loop:
    - "alias cdnet=\"cd /etc/sysconfig/network-scripts\""
    - "alias vie0=\"vim /etc/sysconfig/network-scripts/ifcfg-eth0\""
    - "alias vie1=\"vim /etc/sysconfig/network-scripts/ifcfg-eth1\""
    - "alias scandisk=\"echo '- - -' > /sys/class/scsi_host/host0/scan;echo '- - -' > /sys/class/scsi_host/host1/scan;echo '- - -' > /sys/class/scsi_host/host2/scan\""
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: set Ubuntu alias
  lineinfile:
    path: ~/.bashrc
    line: "{{ item }}"
  loop:
    - "alias cdnet=\"cd /etc/netplan\""
    - "alias scandisk=\"echo '- - -' > /sys/class/scsi_host/host0/scan;echo '- - -' > /sys/class/scsi_host/host1/scan;echo '- - -' > /sys/class/scsi_host/host2/scan\""
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/set_mirror.yml
- name: find CentOS or Rocky repo files
  find:
    paths: /etc/yum.repos.d/
    patterns: "*.repo"
  register: FILENAME
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky repo files
  file:
    path: "{{ item.path }}"
    state: absent
  with_items: "{{ FILENAME.files }}"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: set CentOS8 or Rocky8 Mirror warehouse
  template:
    src: yum8.repo.j2
    dest: /etc/yum.repos.d/base.repo
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: set CentOS7 Mirror warehouse
  template:
    src: yum7.repo.j2
    dest: /etc/yum.repos.d/base.repo
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: set Ubuntu Mirror warehouse
  template:
    src: apt.list.j2
    dest: /etc/apt/sources.list
  when:
    - ansible_distribution=="Ubuntu"
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/main.yml
- include: set_hostname.yml
- include: set_hosts.yml
- include: disable_selinux.yml
- include: disable_firewall.yml
- include: disable_networkmanager.yml
- include: disable_swap.yml
- include: set_limits.yml
- include: optimization_sshd.yml
- include: set_alias.yml
- include: set_mirror.yml

[root@ansible-server reset]# cd ../../
[root@ansible-server ansible]# tree roles/reset/
[root@ansible-server ansible]# tree roles/reset/
roles/reset/
├── tasks
│   ├── disable_firewall.yml
│   ├── disable_networkmanager.yml
│   ├── disable_selinux.yml
│   ├── disable_swap.yml
│   ├── main.yml
│   ├── optimization_sshd.yml
│   ├── set_alias.yml
│   ├── set_hostname.yml
│   ├── set_hosts.yml
│   ├── set_limits.yml
│   └── set_mirror.yml
├── templates
│   ├── apt.list.j2
│   ├── yum7.repo.j2
│   └── yum8.repo.j2
└── vars
    └── main.yml

3 directories, 15 files

[root@ansible-server ansible]# vim reset_role.yml
---
- hosts: all

  roles:
    - role: reset

[root@ansible-server ansible]# ansible-playbook reset_role.yml 

5.2 安装软件包

[root@ansible-server ansible]# mkdir -p roles/reset-installpackage/{files,tasks}

[root@ansible-server ansible]# cd roles/reset-installpackage/
[root@ansible-server reset-installpackage]# ls
files  tasks

[root@ansible-server reset-installpackage]# wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm -P files/

[root@ansible-server reset-installpackage]# wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm -P files/

[root@ansible-server reset-installpackage]# vim files/ge4.18_ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

[root@ansible-server reset-installpackage]# vim files/lt4.18_ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

[root@ansible-server reset-installpackage]# vim files/k8s.conf 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384


[root@ansible-server reset-installpackage]# vim tasks/install_package.yml
- name: install Centos or Rocky package
  yum:
    name: vim,tree,lrzsz,wget,jq,psmisc,net-tools,telnet,git
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: install Centos8 or Rocky8 package
  yum:
    name: rsync
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install Ubuntu package
  apt:
    name: tree,lrzsz,jq
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset-installpackage]# vim tasks/set_centos7_kernel.yml
- name: update CentOS7
  yum:
    name: '*'
    state: latest
    exclude: kernel*
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: copy CentOS7 kernel files
  copy: 
    src: "{{ item }}"
    dest: /tmp
  loop:
    - kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
    - kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: Finding RPM files 
  find: 
    paths: "/tmp" 
    patterns: "*.rpm" 
  register: RPM_RESULT
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: Install RPM 
  yum: 
    name: "{{ item.path }}" 
  with_items: "{{ RPM_RESULT.files }}" 
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete kernel files
  file:
    path: "{{ item.path }}"
    state: absent 
  with_items: "{{ RPM_RESULT.files }}" 
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: set grub
  shell:
    cmd: grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg; grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"

[root@ansible-server reset-installpackage]# vim tasks/install_ipvsadm.yml
- name: install CentOS or Rocky ipvsadm
  yum:
    name: ipvsadm,ipset,sysstat,conntrack,libseccomp
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.k8s_cluster
- name: install Ubuntu ipvsadm
  apt:
    name: ipvsadm,ipset,sysstat,conntrack,libseccomp-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.k8s_cluster

[root@ansible-server reset-installpackage]# vim tasks/set_ipvs.yml
- name: configuration load_mod
  shell:
    cmd: |
      modprobe -- ip_vs
      modprobe -- ip_vs_rr
      modprobe -- ip_vs_wrr
      modprobe -- ip_vs_sh
  when:
    - inventory_hostname in groups.k8s_cluster
- name: configuration load_mod kernel ge4.18
  shell:
    cmd: modprobe -- nf_conntrack
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") or (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="20")
    - inventory_hostname in groups.k8s_cluster
- name: configuration load_mod kernel lt4.18
  shell:
    cmd: modprobe -- nf_conntrack_ipv4
  when:
    - (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="18")
    - inventory_hostname in groups.k8s_cluster
- name: Copy ge4.18_ipvs.conf file
  copy: 
    src: ge4.18_ipvs.conf
    dest: /etc/modules-load.d/ipvs.conf
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") or (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="20")
    - inventory_hostname in groups.k8s_cluster
- name: Copy lt4.18_ipvs.conf file
  copy: 
    src: lt4.18_ipvs.conf
    dest: /etc/modules-load.d/ipvs.conf
  when:
    - (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="18")
    - inventory_hostname in groups.k8s_cluster
- name: start systemd-modules-load service 
  systemd:
    name: systemd-modules-load
    state: started
    enabled: yes
  when:
    - inventory_hostname in groups.k8s_cluster

[root@ansible-server reset-installpackage]# vim tasks/set_k8s_kernel.yml
- name: copy k8s.conf file
  copy: 
    src: k8s.conf
    dest: /etc/sysctl.d/
- name: Load kernel config
  shell:
    cmd: "sysctl --system"

[root@ansible-server reset-installpackage]# vim tasks/reboot_system.yml
- name: reboot system
  reboot:

[root@ansible-server reset-installpackage]# vim tasks/main.yml
- include: install_package.yml
- include: set_centos7_kernel.yml
- include: install_ipvsadm.yml
- include: set_ipvs.yml
- include: set_k8s_kernel.yml
- include: reboot_system.yml

[root@ansible-server reset-installpackage]# cd ../../
[root@ansible-server ansible]# tree roles/reset-installpackage/
roles/reset-installpackage/
├── files
│   ├── ge4.18_ipvs.conf
│   ├── k8s.conf
│   ├── kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
│   ├── kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
│   └── lt4.18_ipvs.conf
└── tasks
    ├── install_ipvsadm.yml
    ├── install_package.yml
    ├── main.yml
    ├── reboot_system.yml
    ├── set_centos7_kernel.yml
    ├── set_ipvs.yml
    └── set_k8s_kernel.yml

2 directories, 12 files

[root@ansible-server ansible]# vim reset_installpackage_role.yml 
---
- hosts: all
  serial: 3

  roles:
    - role: reset-installpackage

[root@ansible-server ansible]# ansible-playbook reset_installpackage_role.yml 

6.chrony

6.1 chrony-server

[root@ansible-server ansible]# mkdir -p roles/chrony-server/{tasks,handlers}

[root@ansible-server ansible]# cd roles/chrony-server/
[root@ansible-server chrony-server]# ls
handlers  tasks

[root@ansible-server chrony-server]# vim tasks/install_chrony_yum.yml
- name: install CentOS or Rocky chrony
  yum:
    name: chrony
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^server.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^server.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: add Time server for CentOS or Rocky /etc/chrony.conf file
  lineinfile:
    path: /etc/chrony.conf
    insertafter: '^# Please consider .*'
    line: "server ntp.aliyun.com iburst\nserver time1.cloud.tencent.com iburst\nserver ntp.tuna.tsinghua.edu.cn iburst"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: Substitution '^#(allow).*' string for CentOS or Rocky /etc/chrony.conf file
  replace:
    path: /etc/chrony.conf
    regexp: '^#(allow).*'
    replace: '\1 0.0.0.0/0'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: Substitution '^#(local).*' string for CentOS or Rocky /etc/chrony.conf file
  replace:
    path: /etc/chrony.conf
    regexp: '^#(local).*'
    replace: '\1 stratum 10'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd

[root@ansible-server chrony-server]# vim tasks/install_chrony_apt.yml
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu chrony
  apt:
    name: chrony
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete Ubuntu /etc/chrony/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd
- name: add Time server for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    insertafter: '^# See http:.*'
    line: "server ntp.aliyun.com iburst\nserver time1.cloud.tencent.com iburst\nserver ntp.tuna.tsinghua.edu.cn iburst"
  when:
    - ansible_distribution=="Ubuntu"
- name: add 'allow 0.0.0.0/0' string and 'local stratum 10' string for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    line: "{{ item }}"
  loop:
    - "allow 0.0.0.0/0"
    - "local stratum 10"
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd

[root@ansible-server chrony-server]# vim tasks/service.yml
- name: start chronyd
  systemd:
    name: chronyd
    state: started
    enabled: yes

[root@ansible-server chrony-server]# vim tasks/main.yml
- include: install_chrony_yum.yml
- include: install_chrony_apt.yml
- include: service.yml

[root@ansible-server chrony-server]# vim handlers/main.yml
- name: restart chronyd
  systemd:
    name: chronyd
    state: restarted

[root@ansible-server chrony-server]# cd ../../
[root@ansible-server ansible]# tree roles/chrony-server/
roles/chrony-server/
├── handlers
│   └── main.yml
└── tasks
    ├── install_chrony_apt.yml
    ├── install_chrony_yum.yml
    ├── main.yml
    └── service.yml

2 directories, 5 files

[root@ansible-server ansible]# vim chrony_server_role.yml 
---
- hosts: chrony_server

  roles:
    - role: chrony-server

[root@ansible-server ansible]# ansible-playbook chrony_server_role.yml

[root@k8s-ha01 ~]# chronyc sources -nv
210 Number of sources = 3
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^- 203.107.6.88                  2   6    37    62    -15ms[  -15ms] +/-   35ms
^* 139.199.215.251               2   6    37    62    -10us[+1488us] +/-   37ms
^? 101.6.6.172                   0   7     0     -     +0ns[   +0ns] +/-    0ns

[root@k8s-ha02 ~]# chronyc sources -nv
210 Number of sources = 3
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 203.107.6.88                  2   6    77     3  -4058us[+2582us] +/-   31ms
^+ 139.199.215.251               2   6    77     2  +6881us[+6881us] +/-   33ms
^? 101.6.6.172                   0   7     0     -     +0ns[   +0ns] +/-    0ns

6.2 chrony-client

[root@ansible-server ansible]# mkdir -p roles/chrony-client/{tasks,handlers,vars}
[root@ansible-server ansible]# cd roles/chrony-client/
[root@ansible-server chrony-client]# ls
handlers  tasks  vars

#下面IP设置成chrony-server的IP地址,SERVER1设置ha1的IP地址,SERVER2设置ha2的IP地址
[root@ansible-server chrony-client]# vim vars/main.yml
SERVER1: 172.31.3.104
SERVER2: 172.31.3.105

[root@ansible-server chrony-client]# vim tasks/install_chrony_yum.yml
- name: install CentOS or Rocky chrony
  yum:
    name: chrony
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^server.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^server.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: add Time server for CentOS or Rocky /etc/chrony.conf file
  lineinfile:
    path: /etc/chrony.conf
    insertafter: '^# Please consider .*'
    line: "server {{ SERVER1 }} iburst\nserver {{ SERVER2 }} iburst"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd

[root@ansible-server chrony-client]# vim tasks/install_chrony_apt.yml
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu chrony
  apt:
    name: chrony
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete Ubuntu /etc/chrony/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd
- name: add Time server for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    insertafter: '^# See http:.*'
    line: "server {{ SERVER1 }} iburst\nserver {{ SERVER2 }} iburst"
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd

[root@ansible-server chrony-client]# vim tasks/service.yml
- name: start chronyd
  systemd:
    name: chronyd
    state: started
    enabled: yes

[root@ansible-server chrony-client]# vim tasks/main.yml
- include: install_chrony_yum.yml
- include: install_chrony_apt.yml
- include: service.yml

[root@ansible-server chrony-client]# vim handlers/main.yml
- name: restart chronyd
  systemd:
    name: chronyd
    state: restarted

[root@ansible-server chrony-client]# cd ../../
[root@ansible-server ansible]# tree roles/chrony-client/
roles/chrony-client/
├── handlers
│   └── main.yml
├── tasks
│   ├── install_chrony_apt.yml
│   ├── install_chrony_yum.yml
│   ├── main.yml
│   └── service.yml
└── vars
    └── main.yml

3 directories, 6 files

[root@ansible-server ansible]# vim chrony_client_role.yml
---
- hosts: chrony_client

  roles:
    - role: chrony-client

[root@ansible-server ansible]# ansible-playbook chrony_client_role.yml

[root@k8s-master01 ~]# chronyc sources -nv
210 Number of sources = 2
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* k8s-ha01                      3   6    17    28    -57us[  -29us] +/-   31ms
^+ k8s-ha02                      3   6    17    29   +204us[ +231us] +/-   34ms

7.haproxy

[root@ansible-server ansible]# mkdir -p roles/haproxy/{tasks,vars,files,templates}
[root@ansible-server ansible]# cd roles/haproxy/
[root@ansible-server haproxy]# ls
files  tasks  templates  vars

[root@ansible-server haproxy]# wget http://www.lua.org/ftp/lua-5.4.3.tar.gz -P files/
[root@ansible-server haproxy]# wget https://www.haproxy.org/download/2.4/src/haproxy-2.4.10.tar.gz -P files/

[root@ansible-server haproxy]# vim files/haproxy.service
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID

[Install]
WantedBy=multi-user.target

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server haproxy]# vim vars/main.yml
SRC_DIR: /usr/local/src
LUA_FILE: lua-5.4.3.tar.gz
HAPROXY_FILE: haproxy-2.4.10.tar.gz
HAPROXY_INSTALL_DIR: /apps/haproxy
STATS_AUTH_USER: admin
STATS_AUTH_PASSWORD: 123456
VIP: 172.31.3.188

[root@ansible-server haproxy]# vim templates/haproxy.cfg.j2
global
maxconn 100000
chroot {{ HAPROXY_INSTALL_DIR }}
stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
uid 99
gid 99
daemon
pidfile /var/lib/haproxy/haproxy.pid
log 127.0.0.1 local3 info

defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms

listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth {{ STATS_AUTH_USER }}:{{ STATS_AUTH_PASSWORD }}

listen kubernetes-6443
    bind {{ VIP }}:6443
    mode tcp
    log global
    {% for i in groups.master %}
    server {{ i }} {{ i }}:6443 check inter 3s fall 2 rise 5
    {% endfor %}

listen harbor-80
    bind {{ VIP }}:80
    mode http
    log global
    balance source
    {% for i in groups.harbor %}
    server {{ i }} {{ i }}:80 check inter 3s fall 2 rise 5
    {% endfor %}

[root@ansible-server haproxy]# vim tasks/install_package.yml
- name: install CentOS or Rocky depend on the package
  yum:
    name: gcc,make,gcc-c++,glibc,glibc-devel,pcre,pcre-devel,openssl,openssl-devel,systemd-devel,libtermcap-devel,ncurses-devel,libevent-devel,readline-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.haproxy
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy
- name: install Ubuntu depend on the package
  apt:
    name: gcc,make,openssl,libssl-dev,libpcre3,libpcre3-dev,zlib1g-dev,libreadline-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/build_lua.yml
- name: unarchive lua package
  unarchive:
    src: "{{ LUA_FILE }}"
    dest: "{{ SRC_DIR }}"
  when:
    - inventory_hostname in groups.haproxy
- name: get LUA_DIR directory
  shell:
    cmd: echo {{ LUA_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: LUA_DIR
  when:
    - inventory_hostname in groups.haproxy
- name: Build and install lua
  shell: 
    chdir: "{{ SRC_DIR }}/{{ LUA_DIR.stdout }}"
    cmd: make all test
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/build_haproxy.yml
- name: unarchive haproxy package
  unarchive:
    src: "{{ HAPROXY_FILE }}"
    dest: "{{ SRC_DIR }}"
  when:
    - inventory_hostname in groups.haproxy
- name: get HAPROXY_DIR directory
  shell:
    cmd: echo {{ HAPROXY_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: HAPROXY_DIR
  when:
    - inventory_hostname in groups.haproxy
- name: make Haproxy
  shell: 
    chdir: "{{ SRC_DIR }}/{{ HAPROXY_DIR.stdout }}"
    cmd: make -j {{ ansible_processor_vcpus }} ARCH=x86_64 TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_CPU_AFFINITY=1 USE_LUA=1 LUA_INC={{ SRC_DIR }}/{{ LUA_DIR.stdout }}/src/ LUA_LIB={{ SRC_DIR }}/{{ LUA_DIR.stdout }}/src/ PREFIX={{ HAPROXY_INSTALL_DIR }}
  when:
    - inventory_hostname in groups.haproxy
- name: make install Haproxy
  shell: 
    chdir: "{{ SRC_DIR }}/{{ HAPROXY_DIR.stdout }}"
    cmd: make install PREFIX={{ HAPROXY_INSTALL_DIR }}
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/config.yml
- name: copy haproxy.service file
  copy:
    src: haproxy.service
    dest: /lib/systemd/system
  when:
    - inventory_hostname in groups.haproxy
- name: create haproxy link
  file:
    src: "../..{{ HAPROXY_INSTALL_DIR }}/sbin/{{ item.src }}"
    dest: "/usr/sbin/{{ item.src }}"
    state: link
    owner: root
    group: root
    mode: 755
    force: yes   
  with_items:
    - src: haproxy
  when:
    - inventory_hostname in groups.haproxy
- name: create /etc/haproxy directory
  file:
    path: /etc/haproxy
    state: directory
  when:
    - inventory_hostname in groups.haproxy
- name: create /var/lib/haproxy/ directory
  file:
    path: /var/lib/haproxy/
    state: directory
  when:
    - inventory_hostname in groups.haproxy
- name: copy haproxy.cfg file
  template:
    src: haproxy.cfg.j2
    dest: /etc/haproxy/haproxy.cfg
  when:
    - inventory_hostname in groups.haproxy
- name: Add the kernel
  sysctl:
    name: net.ipv4.ip_nonlocal_bind
    value: "1"
  when:
    - inventory_hostname in groups.haproxy
- name: PATH variable
  copy:
    content: 'PATH={{ HAPROXY_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/haproxy.sh
  when:
    - inventory_hostname in groups.haproxy
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/haproxy.sh
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/service.yml
- name: start haproxy
  systemd:
    name: haproxy
    state: started
    enabled: yes
    daemon_reload: yes
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/main.yml
- include: install_package.yml
- include: build_lua.yml
- include: build_haproxy.yml
- include: config.yml
- include: service.yml

[root@ansible-server haproxy]# cd ../../
[root@ansible-server ansible]# tree roles/haproxy/
roles/haproxy/
├── files
│   ├── haproxy-2.4.10.tar.gz
│   ├── haproxy.service
│   └── lua-5.4.3.tar.gz
├── tasks
│   ├── build_haproxy.yml
│   ├── build_lua.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   └── haproxy.cfg.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim haproxy_role.yml
---
- hosts: haproxy:master:harbor

  roles:
    - role: haproxy

[root@ansible-server ansible]# ansible-playbook haproxy_role.yml

8.keepalived

8.1 keepalived-master

[root@ansible-server ansible]# mkdir -p roles/keepalived-master/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/keepalived-master/
[root@ansible-server keepalived-master]# ls
files  tasks  templates  vars

[root@ansible-server keepalived-master]#  wget https://keepalived.org/software/keepalived-2.2.4.tar.gz -P files/

[root@ansible-server keepalived-master]# vim files/check_haproxy.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-09
#FileName:      check_haproxy.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
err=0
for k in $(seq 1 3);do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server keepalived-master]# vim vars/main.yml
URL: mirrors.cloud.tencent.com
ROCKY_URL: mirrors.sjtug.sjtu.edu.cn
KEEPALIVED_FILE: keepalived-2.2.4.tar.gz
SRC_DIR: /usr/local/src
KEEPALIVED_INSTALL_DIR: /apps/keepalived
STATE: MASTER
PRIORITY: 100
VIP: 172.31.3.188

[root@ansible-server keepalived-master]# vim templates/PowerTools.repo.j2 
[PowerTools]
name=PowerTools
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever/PowerTools/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[root@ansible-server keepalived-master]# vim templates/keepalived.conf.j2
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}

vrrp_script check_haoroxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2  
    rise 1
}

vrrp_instance VI_1 {
    state {{ STATE }}
    interface {{ ansible_default_ipv4.interface }}
    virtual_router_id 51
    priority {{ PRIORITY }}
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        {{ VIP }} dev {{ ansible_default_ipv4.interface }} label {{ ansible_default_ipv4.interface }}:1
    }
    track_script {
       check_haproxy
    }
}

[root@ansible-server keepalived-master]# vim tasks/install_package.yml
- name: find "[PowerTools]" mirror warehouse
  find:
    path: /etc/yum.repos.d/
    contains: '\[PowerTools\]'
  register: RETURN
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: copy repo file
  template:
    src: PowerTools.repo.j2
    dest: /etc/yum.repos.d/PowerTools.repo
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") and (ansible_distribution_major_version=="8") 
    - RETURN.matched == 0
- name: install CentOS8 or Rocky8 depend on the package
  yum:
    name: make,gcc,ipvsadm,autoconf,automake,openssl-devel,libnl3-devel,iptables-devel,ipset-devel,file-devel,net-snmp-devel,glib2-devel,pcre2-devel,libnftnl-devel,libmnl-devel,systemd-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install CentOS7 depend on the package
  yum:
    name: make,gcc,libnfnetlink-devel,libnfnetlink,ipvsadm,libnl,libnl-devel,libnl3,libnl3-devel,lm_sensors-libs,net-snmp-agent-libs,net-snmp-libs,openssh-server,openssh-clients,openssl,openssl-devel,automake,iproute
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu 20.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: install Ubuntu 18.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,iptables-dev,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="18"

[root@ansible-server keepalived-master]# vim tasks/keepalived_file.yml
- name: unarchive  keepalived package
  unarchive:
    src: "{{ KEEPALIVED_FILE }}"
    dest: "{{ SRC_DIR }}"

[root@ansible-server keepalived_master]# vim tasks/build.yml
- name: get KEEPALIVED_DIR directory
  shell:
    cmd: echo {{ KEEPALIVED_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: KEEPALIVED_DIR
- name: Build and install Keepalived
  shell: 
    chdir: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}"
    cmd: ./configure --prefix={{ KEEPALIVED_INSTALL_DIR }} --disable-fwmark
- name: make && make install
  shell:
    chdir: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}"
    cmd: make -j {{ ansible_processor_vcpus }} && make install

[root@ansible-server keepalived-master]# vim tasks/config.yml
- name: create /etc/keepalived directory
  file:
    path: /etc/keepalived
    state: directory
- name: copy keepalived.conf file
  template:
    src: keepalived.conf.j2
    dest: /etc/keepalived/keepalived.conf
- name: copy check_haproxy.sh file
  copy:
    src: check_haproxy.sh
    dest: /etc/keepalived/
    mode: 0755
- name: copy keepalived.service file
  copy:
    remote_src: True
    src: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}/keepalived/keepalived.service"
    dest: /lib/systemd/system/
- name: PATH variable
  copy:
    content: 'PATH={{ KEEPALIVED_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/keepalived.sh
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/keepalived.sh

[root@ansible-server keepalived-master]# vim tasks/service.yml
- name: start keepalived
  systemd:
    name: keepalived
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server keepalived-master]# vim tasks/main.yml
- include: install_package.yml
- include: keepalived_file.yml
- include: build.yml
- include: config.yml
- include: service.yml

[root@ansible-server keepalived-master]# cd ../../
[root@ansible-server ansible]# tree roles/keepalived-master/
roles/keepalived-master/
├── files
│   ├── check_haproxy.sh
│   └── keepalived-2.2.4.tar.gz
├── tasks
│   ├── build.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── keepalived_file.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── keepalived.conf.j2
│   └── PowerTools.repo.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim keepalived_master_role.yml 
---
- hosts: keepalives_master

  roles:
    - role: keepalived-master

[root@ansible-server ansible]# ansible-playbook keepalived_master_role.yml 

8.2 keepalived-backup

[root@ansible-server ansible]# mkdir -p roles/keepalived-backup/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/keepalived-backup/
[root@ansible-server keepalived-master]# ls
files  tasks  templates  vars

[root@ansible-server keepalived-backup]#  wget https://keepalived.org/software/keepalived-2.2.4.tar.gz -P files/

[root@ansible-server keepalived-backup]# vim files/check_haproxy.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-09
#FileName:      check_haproxy.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
err=0
for k in $(seq 1 3);do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server keepalived-backup]# vim vars/main.yml
URL: mirrors.cloud.tencent.com
ROCKY_URL: mirrors.sjtug.sjtu.edu.cn
KEEPALIVED_FILE: keepalived-2.2.4.tar.gz
SRC_DIR: /usr/local/src
KEEPALIVED_INSTALL_DIR: /apps/keepalived
STATE: BACKUP
PRIORITY: 90
VIP: 172.31.3.188

[root@ansible-server keepalived-backup]# vim templates/PowerTools.repo.j2 
[PowerTools]
name=PowerTools
{% if ansible_distribution =="Rocky" %}
baseurl=https://{{ ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{% elif ansible_distribution=="CentOS" %}
baseurl=https://{{ URL }}/centos/$releasever/PowerTools/$basearch/os/
{% endif %}
gpgcheck=1
{% if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{% elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{% endif %}

[root@ansible-server keepalived-backup]# vim templates/keepalived.conf.j2
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}

vrrp_script check_haoroxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2  
    rise 1
}

vrrp_instance VI_1 {
    state {{ STATE }}
    interface {{ ansible_default_ipv4.interface }}
    virtual_router_id 51
    priority {{ PRIORITY }}
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        {{ VIP }} dev {{ ansible_default_ipv4.interface }} label {{ ansible_default_ipv4.interface }}:1
    }
    track_script {
       check_haproxy
    }
}

[root@ansible-server keepalived-backup]# vim tasks/install_package.yml
- name: find "[PowerTools]" mirror warehouse
  find:
    path: /etc/yum.repos.d/
    contains: '\[PowerTools\]'
  register: RETURN
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: copy repo file
  template:
    src: PowerTools.repo.j2
    dest: /etc/yum.repos.d/PowerTools.repo
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") and (ansible_distribution_major_version=="8") 
    - RETURN.matched == 0
- name: install CentOS8 or Rocky8 depend on the package
  yum:
    name: make,gcc,ipvsadm,autoconf,automake,openssl-devel,libnl3-devel,iptables-devel,ipset-devel,file-devel,net-snmp-devel,glib2-devel,pcre2-devel,libnftnl-devel,libmnl-devel,systemd-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install CentOS7 depend on the package
  yum:
    name: make,gcc,libnfnetlink-devel,libnfnetlink,ipvsadm,libnl,libnl-devel,libnl3,libnl3-devel,lm_sensors-libs,net-snmp-agent-libs,net-snmp-libs,openssh-server,openssh-clients,openssl,openssl-devel,automake,iproute
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu 20.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: install Ubuntu 18.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,iptables-dev,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="18"

[root@ansible-server keepalived-backup]# vim tasks/keepalived_file.yml
- name: unarchive  keepalived package
  unarchive:
    src: "{{ KEEPALIVED_FILE }}"
    dest: "{{ SRC_DIR }}"

[root@ansible-server keepalived_backup]# vim tasks/build.yml
- name: get KEEPALIVED_DIR directory
  shell:
    cmd: echo {{ KEEPALIVED_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: KEEPALIVED_DIR
- name: Build and install Keepalived
  shell: 
    chdir: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}"
    cmd: ./configure --prefix={{ KEEPALIVED_INSTALL_DIR }} --disable-fwmark
- name: make && make install
  shell:
    chdir: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}"
    cmd: make -j {{ ansible_processor_vcpus }} && make install

[root@ansible-server keepalived-backup]# vim tasks/config.yml
- name: create /etc/keepalived directory
  file:
    path: /etc/keepalived
    state: directory
- name: copy keepalived.conf file
  template:
    src: keepalived.conf.j2
    dest: /etc/keepalived/keepalived.conf
- name: copy check_haproxy.sh file
  copy:
    src: check_haproxy.sh
    dest: /etc/keepalived/
    mode: 0755
- name: copy keepalived.service file
  copy:
    remote_src: True
    src: "{{ SRC_DIR }}/{{ KEEPALIVED_DIR.stdout }}/keepalived/keepalived.service"
    dest: /lib/systemd/system/
- name: PATH variable
  copy:
    content: 'PATH={{ KEEPALIVED_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/keepalived.sh
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/keepalived.sh

[root@ansible-server keepalived-backup]# vim tasks/service.yml
- name: start keepalived
  systemd:
    name: keepalived
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server keepalived-backup]# vim tasks/main.yml
- include: install_package.yml
- include: keepalived_file.yml
- include: build.yml
- include: config.yml
- include: service.yml

[root@ansible-server keepalived-backup]# cd ../../
[root@ansible-server ansible]# tree roles/keepalived-backup/
roles/keepalived-backup/
├── files
│   ├── check_haproxy.sh
│   └── keepalived-2.2.4.tar.gz
├── tasks
│   ├── build.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── keepalived_file.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── keepalived.conf.j2
│   └── PowerTools.repo.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim keepalived_backup_role.yml 
---
- hosts: keepalives_backup

  roles:
    - role: keepalived-backup

[root@ansible-server ansible]# ansible-playbook keepalived_backup_role.yml 

9.harbor

9.1 docker基于二进制包

[root@ansible-server ansible]# mkdir -p roles/docker-binary/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/docker-binary/
[root@ansible-server docker-binary]# ls
files  tasks  vars  templates

[root@ansible-server docker-binary]# wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.12.tgz -P files/

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server docker-binary]# vim vars/main.yml
DOCKER_VERSION: 20.10.14
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server docker-binary]# vim files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H unix://var/run/docker.sock
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

[root@ansible-server docker-binary]# vim templates/daemon.json
{
    "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
    ],
    "insecure-registries": ["{{ HARBOR_DOMAIN }}"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 5,
    "log-opts": {
        "max-size": "300m",
        "max-file": "2"
    },
    "live-restore": true
}

[root@ansible-server docker-binary]# vim tasks/docker_files.yml
- name: unarchive  docker package
  unarchive:
    src: "docker-{{ DOCKER_VERSION }}.tgz"
    dest: /usr/local/src
- name: move docker files
  shell:
    cmd: mv /usr/local/src/docker/* /usr/bin/

[root@ansible-server docker-binary]# vim tasks/service_file.yml
- name: copy docker.service file
  copy:
    src: docker.service
    dest: /lib/systemd/system/docker.service

[root@ansible-server docker-binary]# vim tasks/set_mirror_accelerator.yml
- name: mkdir /etc/docker
  file:
    path: /etc/docker
    state: directory
- name: set mirror_accelerator
  template:
    src: daemon.json.j2
    dest: /etc/docker/daemon.json

[root@ansible-server docker-binary]# vim tasks/set_alias.yml
- name: set docker alias
  lineinfile:
    path: ~/.bashrc
    line: "{{ item }}"
  loop:
    - "alias rmi=\"docker images -qa|xargs docker rmi -f\""
    - "alias rmc=\"docker ps -qa|xargs docker rm -f\""

[root@ansible-server docker-binary]# vim tasks/service.yml
- name: start docker
  systemd:
    name: docker
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server docker-binary]# vim tasks/set_swap.yml
- name: set WARNING No swap limit support
  replace:
    path: /etc/default/grub
    regexp: '^(GRUB_CMDLINE_LINUX=.*)\"$'
    replace: '\1 swapaccount=1"'
  when:
    - ansible_distribution=="Ubuntu"
- name: update-grub
  shell:
    cmd: update-grub
  when:
    - ansible_distribution=="Ubuntu"
- name: reboot Ubuntu system
  reboot:
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server docker-binary]# vim tasks/main.yml
- include: docker_files.yml
- include: service_file.yml
- include: set_mirror_accelerator.yml
- include: set_alias.yml
- include: service.yml
- include: set_swap.yml

[root@ansible-server docker-binary]# cd ../../
[root@ansible-server ansible]# tree roles/docker-binary/
roles/docker-binary/
├── files
│   ├── docker-19.03.9.tgz
│   └── docker.service
├── tasks
│   ├── docker_files.yml
│   ├── main.yml
│   ├── service_file.yml
│   ├── service.yml
│   ├── set_alias.yml
│   ├── set_mirror_accelerator.yml
│   └── set_swap.yml
├── templates
│   └── daemon.json.j2
└── vars
    └── main.yml

4 directories, 11 files

9.2 docker-compose

[root@ansible-server ansible]# mkdir -p roles/docker-compose/{tasks,files}
[root@ansible-server ansible]# cd roles/docker-compose/
[root@ansible-server docker-compose]# ls
files  tasks

[root@ansible-server docker-compose]# wget https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64 -P files

[root@ansible-server docker-compose]# vim tasks/install_docker_compose.yml
- name: copy docker compose file
  copy:
    src: docker-compose-linux-x86_64
    dest: /usr/bin/docker-compose
    mode: 755

[root@ansible-server docker-compose]# vim tasks/main.yml
- include: install_docker_compose.yml

[root@ansible-server ansible]# tree roles/docker-compose/
roles/docker-compose/
├── files
│   └── docker-compose-linux-x86_64
└── tasks
    ├── install_docker_compose.yml
    └── main.yml

2 directories, 3 files

9.3 harbor

[root@ansible-server ansible]# mkdir -p roles/harbor/{tasks,files,templates,vars,meta}

[root@ansible-server ansible]# cd roles/harbor/
[root@ansible-server harbor]# ls
files  meta  tasks  templates  vars

[root@ansible-server harbor]# wget https://github.com/goharbor/harbor/releases/download/v2.4.1/harbor-offline-installer-v2.4.1.tgz -P files/

[root@ansible-server harbor]# vim templates/harbor.service.j2
[Unit]
Description=Harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/bin/docker-compose -f {{ HARBOR_INSTALL_DIR }}/harbor/docker-compose.yml up
ExecStop=/usr/bin/docker-compose -f {{ HARBOR_INSTALL_DIR }}/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target

[root@ansible-server harbor]# vim vars/main.yml
HARBOR_INSTALL_DIR: /apps
HARBOR_VERSION: 2.5.0
HARBOR_ADMIN_PASSWORD: 123456

[root@ansible-server harbor]# vim tasks/harbor_files.yml
- name: create HARBOR_INSTALL_DIR directory
  file:
    path: "{{ HARBOR_INSTALL_DIR }}"
    state: directory
- name: unarchive  harbor package
  unarchive:
    src: "harbor-offline-installer-v{{ HARBOR_VERSION }}.tgz"
    dest: "{{ HARBOR_INSTALL_DIR }}/"
    creates: "{{ HARBOR_INSTALL_DIR }}/harbor"

[root@ansible-server harbor]# vim tasks/config.yml
- name: mv harbor.yml
  shell: 
    cmd: mv {{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml.tmpl {{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml
    creates: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
- name: set harbor.yml file 'hostname' string line
  replace: 
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(hostname:) .*'
    replace: '\1 {{ ansible_default_ipv4.address }}'
- name: set harbor.yml file 'harbor_admin_password' string line
  replace: 
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(harbor_admin_password:) .*'
    replace: '\1 {{ HARBOR_ADMIN_PASSWORD }}'
- name: set harbor.yml file 'https' string line
  replace:
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(https:)'
    replace: '#\1'
- name: set harbor.yml file 'port' string line
  replace: 
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (port: 443)'
    replace: '#  \1'
- name: set harbor.yml file 'certificate' string line
  replace: 
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (certificate: .*)'
    replace: '#  \1'
- name: set harbor.yml file 'private_key' string line
  replace: 
    path: "{{ HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (private_key: .*)'
    replace: '#  \1'

[root@ansible-server harbor]# vim tasks/install_python.yml
- name: install CentOS or Rocky python
  yum:
    name: python3
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete lock files
  file:
    path: "{{ item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu python
  apt:
    name: python3
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server harbor]# vim tasks/install_harbor.yml
- name: install harbor
  shell:
    cmd: "{{ HARBOR_INSTALL_DIR }}/harbor/install.sh"

[root@ansible-server harbor]# vim tasks/service_file.yml
- name: copy harbor.service
  template:
    src: harbor.service.j2
    dest: /lib/systemd/system/harbor.service

[root@ansible-server harbor]# vim tasks/service.yml
- name: service enable
  systemd:
    name: harbor
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server harbor]# vim tasks/main.yml
- include: harbor_files.yml
- include: config.yml
- include: install_python.yml
- include: install_harbor.yml
- include: service_file.yml
- include: service.yml

#这里是harbor依赖的角色,docker-binary就是docker基于二进制安装,根据情况修改
[root@ansible-server harbor]# vim meta/main.yml
dependencies:
  - role: docker-binary
  - role: docker-compose

[root@ansible-server harbor]# cd ../../
[root@ansible-server ansible]# tree roles/harbor/
roles/harbor/
├── files
│   └── harbor-offline-installer-v2.4.1.tgz
├── meta
│   └── main.yml
├── tasks
│   ├── config.yml
│   ├── harbor_files.yml
│   ├── install_harbor.yml
│   ├── install_python.yml
│   ├── main.yml
│   ├── service_file.yml
│   └── service.yml
├── templates
│   └── harbor.service.j2
└── vars
    └── main.yml

5 directories, 11 files

[root@ansible-server ansible]# vim harbor_role.yml
---
- hosts: harbor

  roles:
    - role: harbor

[root@ansible-server ansible]# ansible-playbook harbor_role.yml

9.4 创建harbor仓库

这步一定要做,不然后面镜像下载了上传不到harbor,ansible会执行出错
在harbor01新建项目google_containers
在这里插入图片描述
在这里插入图片描述
在harbor02新建项目google_containers
在这里插入图片描述
在这里插入图片描述
在harbor02上新建目标
在这里插入图片描述
在这里插入图片描述
在harbor02上新建规则
在这里插入图片描述
在这里插入图片描述
在harbor01上新建目标
在这里插入图片描述
在这里插入图片描述
在harbor01上新建规则
在这里插入图片描述
在这里插入图片描述

10.部署etcd

10.1 安装etcd

[root@ansible-server ansible]# mkdir -p roles/etcd/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/etcd/
[root@ansible-server etcd]# ls
files  tasks  templates  vars

[root@ansible-server etcd]# wget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz
[root@ansible-server etcd]# mkdir files/etcd
[root@ansible-server etcd]# tar -xf etcd-v3.5.0-linux-amd64.tar.gz --strip-components=1 -C files/etcd/ etcd-v3.5.0-linux-amd64/etcd{,ctl}
[root@ansible-server etcd]# ls files/etcd/
etcd  etcdctl
[root@ansible-server etcd]# rm -f etcd-v3.5.0-linux-amd64.tar.gz

[root@ansible-server etcd]# vim tasks/copy_etcd_file.yml
- name: copy etcd files to etcd
  copy:
    src: "etcd/{{ item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - etcd
    - etcdctl
  when:
    - inventory_hostname in groups.etcd
- name: create /opt/cni/bin directory
  file:
    path: /opt/cni/bin
    state: directory
  when:
    - inventory_hostname in groups.etcd

[root@ansible-server etcd]# wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O files/cfssl
[root@ansible-server etcd]# wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O files/cfssljson

#下面ETCD02和ETCD03的IP地址根据自己的更改
[root@ansible-server etcd]# vim vars/main.yml
ETCD_CLUSTER: etcd
K8S_CLUSTER: kubernetes
ETCD_CERT:
  - etcd-ca-key.pem
  - etcd-ca.pem
  - etcd-key.pem
  - etcd.pem

ETCD02: 172.31.3.109
ETCD03: 172.31.3.110

[root@ansible-server etcd]# mkdir templates/pki
[root@ansible-server etcd]# vim templates/pki/etcd-ca-csr.json.j2
{
  "CN": "{{ ETCD_CLUSTER }}",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}

[root@ansible-server etcd]# vim templates/pki/ca-config.json.j2
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}

[root@ansible-server etcd]# vim templates/pki/etcd-csr.json.j2 
{
  "CN": "{{ ETCD_CLUSTER }}",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}

[root@ansible-server etcd]# vim tasks/create_etcd_cert.yml
- name: copy cfssl and cfssljson tools
  copy: 
    src: "{{ item }}" 
    dest: /usr/local/bin
    mode: 0755
  loop: 
    - cfssl
    - cfssljson
  when:
    - ansible_hostname=="k8s-etcd01"
- name: create /etc/etcd/ssl directory
  file:
    path: /etc/etcd/ssl
    state: directory
  when:
    - inventory_hostname in groups.etcd
- name: create pki directory
  file:
    path: /root/pki
    state: directory
  when:
    - ansible_hostname=="k8s-etcd01"
- name: copy pki files
  template: 
    src: "pki/{{ item }}.j2" 
    dest: "/root/pki/{{ item }}"
  loop: 
    - etcd-ca-csr.json
    - ca-config.json
    - etcd-csr.json
  when:
    - ansible_hostname=="k8s-etcd01"
- name: create etcd-ca cert
  shell:
    chdir: /root/pki
    cmd: cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
    creates: /etc/etcd/ssl/etcd-ca.pem
  when:
    - ansible_hostname=="k8s-etcd01"
- name: create etcd cert
  shell:
    chdir: /root/pki
    cmd: "cfssl gencert -ca=/etc/etcd/ssl/etcd-ca.pem -ca-key=/etc/etcd/ssl/etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,{% for i in groups.etcd %}{{ hostvars[i].ansible_hostname}},{% endfor %}{% for i in groups.etcd %}{{ hostvars[i].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %} -profile={{ K8S_CLUSTER }} etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd"
    creates: /etc/etcd/ssl/etcd-key.pem
  when:
    - ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to etcd02
  synchronize:
    src: "/etc/etcd/ssl/{{ item }}"
    dest: /etc/etcd/ssl/
    mode: pull
  loop:
    "{{ ETCD_CERT }}"
  delegate_to: "{{ ETCD02 }}"
  when:
    - ansible_hostname=="k8s-etcd01"
- name: transfer etcd-ca-key.pem file from etcd01 to etcd03
  synchronize:
    src: "/etc/etcd/ssl/{{ item }}"
    dest: /etc/etcd/ssl/
    mode: pull
  loop:
    "{{ ETCD_CERT }}"
  delegate_to: "{{ ETCD03 }}"
  when:
    - ansible_hostname=="k8s-etcd01"

[root@ansible-server etcd]# mkdir templates/config
[root@ansible-server etcd]# vim templates/config/etcd.config.yml.j2
name: '{{ inventory_hostname }}'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://{{ ansible_default_ipv4.address }}:2380'
listen-client-urls: 'https://{{ ansible_default_ipv4.address }}:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://{{ ansible_default_ipv4.address }}:2380'
advertise-client-urls: 'https://{{ ansible_default_ipv4.address }}:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: '{% for i in groups.etcd %}{{ hostvars[i].inventory_hostname }}=https://{{ hostvars[i].ansible_default_ipv4.address }}:2380{% if not loop.last %},{% endif %}{% endfor %}'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false

[root@ansible-server etcd]# mkdir files/service
[root@ansible-server etcd]# vim files/service/etcd.service
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

[root@ansible-server etcd]# vim tasks/etcd_config.yml
- name: copy etcd_config file
  template: 
    src: config/etcd.config.yml.j2
    dest: /etc/etcd/etcd.config.yml
  when:
    - inventory_hostname in groups.etcd
- name: copy etcd.service file
  copy: 
    src: service/etcd.service
    dest: /lib/systemd/system/etcd.service
  when:
    - inventory_hostname in groups.etcd
- name: create /etc/kubernetes/pki/etcd directory
  file:
    path: /etc/kubernetes/pki/etcd
    state: directory
  when:
    - inventory_hostname in groups.etcd
- name: link etcd_ssl to kubernetes pki
  file: 
    src: "/etc/etcd/ssl/{{ item }}"
    dest: "/etc/kubernetes/pki/etcd/{{ item }}"
    state: link
  loop:
    "{{ ETCD_CERT }}"
  when:
    - inventory_hostname in groups.etcd
- name: start etcd
  systemd:
    name: etcd
    state: started
    enabled: yes
    daemon_reload: yes
  when:
    - inventory_hostname in groups.etcd

[root@ansible-server etcd]# vim tasks/main.yml
- include: copy_etcd_file.yml
- include: create_etcd_cert.yml
- include: etcd_config.yml

[root@ansible-server etcd]# cd ../../
[root@ansible-server ansible]# tree roles/etcd/
roles/etcd/
├── files
│   ├── cfssl
│   ├── cfssljson
│   ├── etcd
│   │   ├── etcd
│   │   └── etcdctl
│   └── service
│       └── etcd.service
├── tasks
│   ├── copy_etcd_file.yml
│   ├── create_etcd_cert.yml
│   ├── etcd_config.yml
│   └── main.yml
├── templates
│   ├── config
│   │   └── etcd.config.yml.j2
│   └── pki
│       ├── ca-config.json.j2
│       ├── etcd-ca-csr.json.j2
│       └── etcd-csr.json.j2
└── vars
    └── main.yml

8 directories, 14 files

[root@ansible-server ansible]# vim etcd_role.yml
---
- hosts: etcd

  roles:
    - role: etcd

[root@ansible-server ansible]# ansible-playbook etcd_role.yml

10.2 验证etcd

[root@k8s-etcd01 ~]# export ETCDCTL_API=3

[root@k8s-etcd01 ~]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 172.31.3.108:2379 | a9fef56ff96ed75c |   3.5.0 |   20 kB |     false |      false |         2 |          8 |                  8 |        |
| 172.31.3.109:2379 | 8319ef09e8b3d277 |   3.5.0 |   20 kB |      true |      false |         2 |          8 |                  8 |        |
| 172.31.3.110:2379 | 209a1f57c506dba2 |   3.5.0 |   20 kB |     false |      false |         2 |          8 |                  8 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

11.部署docker

#只需要创建这个文件就行
[root@ansible-server ansible]# vim docker_binary_role.yml
---
- hosts: k8s_cluster

  roles:
    - role: docker-binary

[root@ansible-server ansible]# ansible-playbook docker_binary_role.yml