kubernetes 1.10.1 部署笔记

218 阅读23分钟
初始环境准备:
主机地址:
    172.16.169.91	master
    172.16.169.92	node01
    172.16.169.93	node02

系统版本:centos7.2 最小化安装
#关闭selinux	#修改配置文件,需要重启系统生效
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux

#临时关闭selinux
setenforce 0

#查看selinux状态
getenforce


#关闭防火墙
systemctl disable firewalld
systemctl stop firewalld

#关闭网络管理工具
systemctl disable NetworkManager
systemctl stop NetworkManager

修改主机名
hostnamectl set-hostname temp-test-01
hostnamectl set-hostname temp-test-02
hostnamectl set-hostname temp-test-03

#添加主机hosts记录
vim /etc/hosts
172.16.169.91 temp-test-01
172.16.169.92 temp-test-02
172.16.169.93 temp-test-03

#安装常用的必要组件
yum install vim net-tools wget tree lrzsz lsof tcpdump nc mtr nmap zip unzip -y
相关程序包,网盘地址:https://pan.baidu.com/s/1QCyhY-bb0c4WEomr4oXLYQ

########## 安装 docker-ce ##########

##########master-01
[root@temp-test-01 ~]# cd /etc/yum.repos.d/
[root@temp-test-01 yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo	#配置使用阿里云
[root@temp-test-01 yum.repos.d]# yum install -y docker-ce	#安装docker
[root@temp-test-01 ~]# systemctl start docker		#启动docker,并设置为自启动
[root@temp-test-01 ~]# systemctl enable docker

##########node-01
[root@temp-test-02 ~]# cd /etc/yum.repos.d/
[root@temp-test-02 yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@temp-test-02 yum.repos.d]# yum install -y docker-ce
[root@temp-test-02 ~]# systemctl start docker
[root@temp-test-02 ~]# systemctl enable docker

##########node-02
[root@temp-test-03 ~]# cd /etc/yum.repos.d/
[root@temp-test-03 yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@temp-test-03 yum.repos.d]# yum install -y docker-ce
[root@temp-test-03 ~]# systemctl start docker
[root@temp-test-03 ~]# systemctl enable docker

########## 三个节点目录、程序包环境准备 ##########

##########master-01##########
[root@temp-test-01 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}	#首先创建需要的目录,三个节点都需要创建

[root@temp-test-01 ~]# sed -i s#'PATH=$PATH:$HOME/bin'#'PATH=$PATH:$HOME/bin:/opt/kubernetes/bin'#g .bash_profile		#设置环境变量,三个节点都需要设置
[root@temp-test-01 ~]# source .bash_profile

[root@temp-test-01 ~]# cd /usr/local/src/
[root@temp-test-01 src]# unzip k8s-v1.10.1-manual.zip																	#解压已经准备好的安装包
[root@temp-test-01 src]# ll
total 579812
drwxr-xr-x 3 root root        24 Apr 23 20:19 k8s-v1.10.1-manual
-rw-r--r-- 1 root root 593725046 Jun 17 15:48 k8s-v1.10.1-manual.zip

[root@temp-test-01 src]# cd k8s-v1.10.1-manual/k8s-v1.10.1/
[root@temp-test-01 k8s-v1.10.1]# ll				#查看安装包
total 599096
-rw-r--r-- 1 root root   6595195 Mar 30  2016 cfssl-certinfo_linux-amd64
-rw-r--r-- 1 root root   2277873 Mar 30  2016 cfssljson_linux-amd64
-rw-r--r-- 1 root root  10376657 Mar 30  2016 cfssl_linux-amd64
-rw-r--r-- 1 root root  17108856 Apr 12 17:35 cni-plugins-amd64-v0.7.1.tgz
-rw-r--r-- 1 root root  10562874 Mar 30 01:58 etcd-v3.2.18-linux-amd64.tar.gz
-rw-r--r-- 1 root root   9706487 Jan 24 02:58 flannel-v0.10.0-linux-amd64.tar.gz
drwxr-xr-x 9 root root       146 Apr 12 23:25 kubernetes
-rw-r--r-- 1 root root  13344537 Apr 13 01:51 kubernetes-client-linux-amd64.tar.gz
-rw-r--r-- 1 root root 112427817 Apr 13 01:51 kubernetes-node-linux-amd64.tar.gz
-rw-r--r-- 1 root root 428337777 Apr 13 01:51 kubernetes-server-linux-amd64.tar.gz
-rw-r--r-- 1 root root   2716855 Apr 13 01:51 kubernetes.tar.gz

##########解压相应的程序包
[root@temp-test-01 k8s-v1.10.1]# tar xf kubernetes-client-linux-amd64.tar.gz
[root@temp-test-01 k8s-v1.10.1]# tar xf kubernetes-node-linux-amd64.tar.gz
[root@temp-test-01 k8s-v1.10.1]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@temp-test-01 k8s-v1.10.1]# cd kubernetes

##########查看一下相应的程序包脚本
[root@temp-test-01 kubernetes]# ll server/bin/
total 2016824
-rwxr-xr-x 1 root root  58245918 Apr 12 23:16 apiextensions-apiserver
-rwxr-xr-x 1 root root 131966577 Apr 12 23:16 cloud-controller-manager
-rw-r--r-- 1 root root         8 Apr 12 23:16 cloud-controller-manager.docker_tag
-rw-r--r-- 1 root root 133343232 Apr 12 23:16 cloud-controller-manager.tar
-rwxr-xr-x 1 root root 266422752 Apr 12 23:16 hyperkube
-rwxr-xr-x 1 root root 156493057 Apr 12 23:16 kubeadm
-rwxr-xr-x 1 root root  57010027 Apr 12 23:16 kube-aggregator
-rw-r--r-- 1 root root         8 Apr 12 23:16 kube-aggregator.docker_tag
-rw-r--r-- 1 root root  58386432 Apr 12 23:16 kube-aggregator.tar
-rwxr-xr-x 1 root root 223882554 Apr 12 23:16 kube-apiserver
-rw-r--r-- 1 root root         8 Apr 12 23:16 kube-apiserver.docker_tag
-rw-r--r-- 1 root root 225259008 Apr 12 23:16 kube-apiserver.tar
-rwxr-xr-x 1 root root 146695941 Apr 12 23:16 kube-controller-manager
-rw-r--r-- 1 root root         8 Apr 12 23:16 kube-controller-manager.docker_tag
-rw-r--r-- 1 root root 148072448 Apr 12 23:16 kube-controller-manager.tar
-rwxr-xr-x 1 root root  54277604 Apr 12 23:17 kubectl
-rwxr-xr-x 1 root root 152789584 Apr 12 23:16 kubelet
-rwxr-xr-x 1 root root  51343381 Apr 12 23:16 kube-proxy
-rw-r--r-- 1 root root         8 Apr 12 23:16 kube-proxy.docker_tag
-rw-r--r-- 1 root root  98919936 Apr 12 23:16 kube-proxy.tar
-rwxr-xr-x 1 root root  49254848 Apr 12 23:16 kube-scheduler
-rw-r--r-- 1 root root         8 Apr 12 23:16 kube-scheduler.docker_tag
-rw-r--r-- 1 root root  50631168 Apr 12 23:16 kube-scheduler.tar
-rwxr-xr-x 1 root root   2165591 Apr 12 23:16 mounter
[root@temp-test-01 kubernetes]# ll client/bin/
total 53008
-rwxr-xr-x 1 root root 54277604 Apr 12 23:16 kubectl
[root@temp-test-01 kubernetes]# ll node/bin/
total 405192
-rwxr-xr-x 1 root root 156493057 Apr 12 23:16 kubeadm
-rwxr-xr-x 1 root root  54277604 Apr 12 23:16 kubectl
-rwxr-xr-x 1 root root 152789584 Apr 12 23:16 kubelet
-rwxr-xr-x 1 root root  51343381 Apr 12 23:16 kube-proxy

########## 证书配置 ##########

##########准备证书生成程序
[root@temp-test-01 k8s-v1.10.1]# chmod +x cfssl*
[root@temp-test-01 k8s-v1.10.1]# mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo
[root@temp-test-01 k8s-v1.10.1]# mv cfssljson_linux-amd64  /opt/kubernetes/bin/cfssljson
[root@temp-test-01 k8s-v1.10.1]# mv cfssl_linux-amd64  /opt/kubernetes/bin/cfssl
[root@temp-test-01 k8s-v1.10.1]# ll /opt/kubernetes/bin/
total 18808
-rwxr-xr-x 1 root root 10376657 Mar 30  2016 cfssl
-rwxr-xr-x 1 root root  6595195 Mar 30  2016 cfssl-certinfo
-rwxr-xr-x 1 root root  2277873 Mar 30  2016 cfssljson

##########密钥认证																										#方便文件远程复制
[root@temp-test-01 k8s-v1.10.1]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
54:06:48:6b:5a:79:8c:61:77:05:fd:a1:23:05:b8:b2 root@temp-test-01
The key's randomart image is:
+--[ RSA 2048]----+
|     .+.oo*=.    |
|     ..B.+  o .  |
|      * +. . o . |
|     +.o. . o .  |
|    .  oS  . .   |
|      E          |
|                 |
|                 |
|                 |
+-----------------+

[root@temp-test-01 k8s-v1.10.1]# ssh-copy-id temp-test-01																#将公钥放至远程服务器
The authenticity of host 'temp-test-01 (172.16.169.91)' can't be established.
ECDSA key fingerprint is 28:90:1c:b0:10:be:d1:bb:d7:f6:e5:16:d7:dc:0d:c8.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@temp-test-01's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'temp-test-01'"
and check to make sure that only the key(s) you wanted were added.

[root@temp-test-01 k8s-v1.10.1]# ssh-copy-id temp-test-02
The authenticity of host 'temp-test-02 (172.16.169.92)' can't be established.
ECDSA key fingerprint is 0c:5e:7a:a7:af:64:a8:60:6e:f1:25:d2:43:b4:01:a0.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@temp-test-02's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'temp-test-02'"
and check to make sure that only the key(s) you wanted were added.

[root@temp-test-01 k8s-v1.10.1]# ssh-copy-id temp-test-03
The authenticity of host 'temp-test-03 (172.16.169.93)' can't be established.
ECDSA key fingerprint is a4:9a:92:bf:00:1f:17:5a:6c:7d:2f:dd:9c:19:b3:1e.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@temp-test-03's password: 
Permission denied, please try again.
root@temp-test-03's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'temp-test-03'"
and check to make sure that only the key(s) you wanted were added.

##########复制证书生成程序到其他节点上
[root@temp-test-01 k8s-v1.10.1]# scp /opt/kubernetes/bin/cfssl* root@172.16.169.92:/opt/kubernetes/bin
cfssl                                                                                                100%   10MB   9.9MB/s   00:00    
cfssl-certinfo                                                                                       100% 6441KB   6.3MB/s   00:00    
cfssljson                                                                                            100% 2224KB   2.2MB/s   00:00    
[root@temp-test-01 k8s-v1.10.1]# scp /opt/kubernetes/bin/cfssl* root@172.16.169.93:/opt/kubernetes/bin
cfssl                                                                                                100%   10MB   9.9MB/s   00:00    
cfssl-certinfo                                                                                       100% 6441KB   6.3MB/s   00:00    
cfssljson                                                                                            100% 2224KB   2.2MB/s   00:00 

##########创建目录,存放临时证书文件
[root@temp-test-01 k8s-v1.10.1]# cd /usr/local/src/
[root@temp-test-01 src]# mkdir ssl && cd ssl

##########创建用来生成 CA 文件的 JSON 配置文件
[root@temp-test-01 ssl]# vim ca-config.json
[root@temp-test-01 ssl]# cat ca-config.json
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "8760h"
      }
    }
  }
}

##########创建用来生成 CA 证书签名请求(CSR)的 JSON 配置文件
[root@temp-test-01 ssl]# vim ca-csr.json
[root@temp-test-01 ssl]# cat ca-csr.json
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

##########生成CA证书(ca.pem)和密钥(ca-key.pem)
[root@temp-test-01 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2018/06/17 16:36:30 [INFO] generating a new CA key and certificate from CSR
2018/06/17 16:36:30 [INFO] generate received request
2018/06/17 16:36:30 [INFO] received CSR
2018/06/17 16:36:30 [INFO] generating key: rsa-2048
2018/06/17 16:36:30 [INFO] encoded CSR
2018/06/17 16:36:30 [INFO] signed certificate with serial number 387862013045470101121631003181506095233148832466

[root@temp-test-01 ssl]# ll
total 20
-rw-r--r-- 1 root root  290 Jun 17 16:34 ca-config.json
-rw-r--r-- 1 root root 1005 Jun 17 16:36 ca.csr
-rw-r--r-- 1 root root  212 Jun 17 16:35 ca-csr.json
-rw------- 1 root root 1675 Jun 17 16:36 ca-key.pem
-rw-r--r-- 1 root root 1371 Jun 17 16:36 ca.pem

##########分发证书
[root@temp-test-01 ssl]# cp ca.csr ca.pem ca-key.pem ca-config.json /opt/kubernetes/ssl
[root@temp-test-01 ssl]# ll /opt/kubernetes/ssl
total 16
-rw-r--r-- 1 root root  290 Jun 17 16:38 ca-config.json
-rw-r--r-- 1 root root 1005 Jun 17 16:38 ca.csr
-rw------- 1 root root 1675 Jun 17 16:38 ca-key.pem
-rw-r--r-- 1 root root 1371 Jun 17 16:38 ca.pem
[root@temp-test-01 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json root@172.16.169.92:/opt/kubernetes/ssl 
ca.csr                                                                                               100% 1005     1.0KB/s   00:00    
ca.pem                                                                                               100% 1371     1.3KB/s   00:00    
ca-key.pem                                                                                           100% 1675     1.6KB/s   00:00    
ca-config.json                                                                                       100%  290     0.3KB/s   00:00    
[root@temp-test-01 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json root@172.16.169.93:/opt/kubernetes/ssl 
ca.csr                                                                                               100% 1005     1.0KB/s   00:00    
ca.pem                                                                                               100% 1371     1.3KB/s   00:00    
ca-key.pem                                                                                           100% 1675     1.6KB/s   00:00    
ca-config.json                                                                                       100%  290     0.3KB/s   00:00    

#########################证书准备的步骤到这里就已经完成了。如果有更多个节点,同样也需要这些证书#########################

########## etcd集群安装步骤 ##########

[root@temp-test-01 k8s-v1.10.1]# pwd
/usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1
[root@temp-test-01 k8s-v1.10.1]# ll
total 580292
-rw-r--r--  1 root root  17108856 Apr 12 17:35 cni-plugins-amd64-v0.7.1.tgz
-rw-r--r--  1 root root  10562874 Mar 30 01:58 etcd-v3.2.18-linux-amd64.tar.gz
-rw-r--r--  1 root root   9706487 Jan 24 02:58 flannel-v0.10.0-linux-amd64.tar.gz
drwxr-xr-x 11 root root      4096 Apr 12 23:17 kubernetes
-rw-r--r--  1 root root  13344537 Apr 13 01:51 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root root 112427817 Apr 13 01:51 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root root 428337777 Apr 13 01:51 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root root   2716855 Apr 13 01:51 kubernetes.tar.gz

[root@temp-test-01 k8s-v1.10.1]# tar xf etcd-v3.2.18-linux-amd64.tar.gz 
[root@temp-test-01 k8s-v1.10.1]# ll etcd-v3.2.18-linux-amd64
total 32368
drwxr-xr-x 11 478493 89939     4096 Mar 30 01:49 Documentation
-rwxr-xr-x  1 478493 89939 17837888 Mar 30 01:49 etcd
-rwxr-xr-x  1 478493 89939 15246720 Mar 30 01:49 etcdctl
-rw-r--r--  1 478493 89939    34246 Mar 30 01:49 README-etcdctl.md
-rw-r--r--  1 478493 89939     5801 Mar 30 01:49 README.md
-rw-r--r--  1 478493 89939     7855 Mar 30 01:49 READMEv2-etcdctl.md

##########准备etcd软件包
[root@temp-test-01 k8s-v1.10.1]# cd etcd-v3.2.18-linux-amd64
[root@temp-test-01 etcd-v3.2.18-linux-amd64]# ll
total 32368
drwxr-xr-x 11 478493 89939     4096 Mar 30 01:49 Documentation
-rwxr-xr-x  1 478493 89939 17837888 Mar 30 01:49 etcd
-rwxr-xr-x  1 478493 89939 15246720 Mar 30 01:49 etcdctl
-rw-r--r--  1 478493 89939    34246 Mar 30 01:49 README-etcdctl.md
-rw-r--r--  1 478493 89939     5801 Mar 30 01:49 README.md
-rw-r--r--  1 478493 89939     7855 Mar 30 01:49 READMEv2-etcdctl.md
[root@temp-test-01 etcd-v3.2.18-linux-amd64]# cp etcd etcdctl /opt/kubernetes/bin/ 
[root@temp-test-01 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl root@172.16.169.92:/opt/kubernetes/bin/
etcd                                                                                                 100%   17MB  17.0MB/s   00:00    
etcdctl                                                                                              100%   15MB  14.5MB/s   00:00    
[root@temp-test-01 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl root@172.16.169.93:/opt/kubernetes/bin/
etcd                                                                                                 100%   17MB  17.0MB/s   00:00    
etcdctl    
                                                                                          100%   15MB  14.5MB/s   00:00 
##########创建etcd证书签名请求
[root@temp-test-01 etcd-v3.2.18-linux-amd64]# cd /usr/local/src/ssl/
[root@temp-test-01 ssl]# vim etcd-csr.json
[root@temp-test-01 ssl]# cat etcd-csr.json
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "172.16.169.91",
    "172.16.169.92",
    "172.16.169.93"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

##########生成etcd证书和私钥
[root@temp-test-01 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2018/06/17 16:56:20 [INFO] generate received request
2018/06/17 16:56:20 [INFO] received CSR
2018/06/17 16:56:20 [INFO] generating key: rsa-2048
2018/06/17 16:56:21 [INFO] encoded CSR
2018/06/17 16:56:21 [INFO] signed certificate with serial number 33293374521688580991219837617061113319230928338
2018/06/17 16:56:21 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@temp-test-01 ssl]# ls -l etcd*
-rw-r--r-- 1 root root 1070 Jun 17 16:56 etcd.csr
-rw-r--r-- 1 root root  303 Jun 17 16:55 etcd-csr.json
-rw------- 1 root root 1675 Jun 17 16:56 etcd-key.pem
-rw-r--r-- 1 root root 1444 Jun 17 16:56 etcd.pem

##########将证书移动到/opt/kubernetes/ssl目录下
[root@temp-test-01 ssl]# cp etcd*.pem /opt/kubernetes/ssl
[root@temp-test-01 ssl]# scp etcd*.pem root@172.16.169.92:/opt/kubernetes/ssl
etcd-key.pem                                                                                         100% 1675     1.6KB/s   00:00    
etcd.pem                                                                                             100% 1444     1.4KB/s   00:00    
[root@temp-test-01 ssl]# scp etcd*.pem root@172.16.169.93:/opt/kubernetes/ssl
etcd-key.pem                                                                                         100% 1675     1.6KB/s   00:00    
etcd.pem                                                                                             100% 1444     1.4KB/s   00:00 

##########设置etcd配置文件
[root@temp-test-01 ssl]# vim /opt/kubernetes/cfg/etcd.conf
[root@temp-test-01 ssl]# cat /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://172.16.169.91:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.169.91:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.169.91:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://172.16.169.91:2380,etcd-node2=https://172.16.169.92:2380,etcd-node3=https://172.16.169.93:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.169.91:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

##########创建ETCD系统服务
[root@temp-test-01 ssl]# vim /etc/systemd/system/etcd.service
[root@temp-test-01 ssl]# cat /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify

[Install]
WantedBy=multi-user.target

[root@temp-test-01 ssl]# scp /opt/kubernetes/cfg/etcd.conf 172.16.169.92:/opt/kubernetes/cfg/
etcd.conf                                                                                            100% 1170     1.1KB/s   00:00    
[root@temp-test-01 ssl]# scp /etc/systemd/system/etcd.service 172.16.169.92:/etc/systemd/system/
etcd.service                                                                                         100%  314     0.3KB/s   00:00    
[root@temp-test-01 ssl]# scp /opt/kubernetes/cfg/etcd.conf 172.16.169.93:/opt/kubernetes/cfg/
etcd.conf                                                                                            100% 1170     1.1KB/s   00:00    
[root@temp-test-01 ssl]# scp /etc/systemd/system/etcd.service 172.16.169.93:/etc/systemd/system/
etcd.service                                                                                         100%  314     0.3KB/s   00:00    

##########在所有节点上创建etcd存储目录
[root@temp-test-01 ssl]# mkdir /var/lib/etcd

##########启动etcd
[root@temp-test-01 ssl]# systemctl daemon-reload										#重新加载系统服务	
[root@temp-test-01 ssl]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /etc/systemd/system/etcd.service.
[root@temp-test-01 ssl]# systemctl start etcd											#还没有部署完成,勿启动,等多个节点都准备再一同启动

##########查看启动状态
[root@temp-test-01 ssl]# systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:23:08 CST; 49s ago
 Main PID: 21266 (etcd)
   Memory: 30.4M
   CGroup: /system.slice/etcd.service
           └─21266 /opt/kubernetes/bin/etcd
Jun 17 17:23:08 temp-test-01 etcd[21266]: set the initial cluster version to 3.0
Jun 17 17:23:08 temp-test-01 etcd[21266]: enabled capabilities for version 3.0
Jun 17 17:23:09 temp-test-01 etcd[21266]: peer b9aa3e8f97fb3da4 became active
Jun 17 17:23:09 temp-test-01 etcd[21266]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream MsgApp v2 reader)
Jun 17 17:23:09 temp-test-01 etcd[21266]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream Message reader)
Jun 17 17:23:09 temp-test-01 etcd[21266]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream MsgApp v2 writer)
Jun 17 17:23:09 temp-test-01 etcd[21266]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream Message writer)
Jun 17 17:23:12 temp-test-01 etcd[21266]: updating the cluster version from 3.0 to 3.2
Jun 17 17:23:12 temp-test-01 etcd[21266]: updated the cluster version from 3.0 to 3.2
Jun 17 17:23:12 temp-test-01 etcd[21266]: enabled capabilities for version 3.2

##########查看2379、2380是否处于监听状态
[root@temp-test-01 ssl]# ss -tnl
State       Recv-Q Send-Q                      Local Address:Port                                     Peer Address:Port              
LISTEN      0      128                         172.16.169.91:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:2379                                                *:*                  
LISTEN      0      128                         172.16.169.91:2380                                                *:*                  
LISTEN      0      128                                     *:22                                                  *:*                  
LISTEN      0      100                             127.0.0.1:25                                                  *:*                  
LISTEN      0      128                                    :::22                                                 :::*                  
LISTEN      0      100                                   ::1:25                                                 :::*          

##########node-01##########
[root@temp-test-02 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}
[root@temp-test-02 ~]# sed -i s#'PATH=$PATH:$HOME/bin'#'PATH=$PATH:$HOME/bin:/opt/kubernetes/bin'#g .bash_profile
[root@temp-test-02 ~]# source .bash_profile

##########修改ETCD配置文件
[root@temp-test-02 ~]# vim /opt/kubernetes/cfg/etcd.conf
[root@temp-test-02 ~]# cat /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node2"																			#修改处
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://172.16.169.92:2380"												#修改处
ETCD_LISTEN_CLIENT_URLS="https://172.16.169.92:2379,https://127.0.0.1:2379"						#修改处
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.169.92:2380"									#修改处
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://172.16.169.91:2380,etcd-node2=https://172.16.169.92:2380,etcd-node3=https://172.16.169.93:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.169.92:2379"											#修改处
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

##########在所有节点上创建etcd存储目录
[root@temp-test-03 ~]# mkdir /var/lib/etcd

##########启动etcd
[root@temp-test-02 ~]# systemctl daemon-reload
[root@temp-test-02 ~]# systemctl start etcd
[root@temp-test-02 ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /etc/systemd/system/etcd.service.

##########查看启动状态
[root@temp-test-02 ~]# systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:23:14 CST; 57s ago
 Main PID: 20979 (etcd)
   Memory: 13.9M
   CGroup: /system.slice/etcd.service
           └─20979 /opt/kubernetes/bin/etcd
Jun 17 17:23:15 temp-test-02 etcd[20979]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream MsgApp v2 reader)
Jun 17 17:23:15 temp-test-02 etcd[20979]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream MsgApp v2 writer)
Jun 17 17:23:15 temp-test-02 etcd[20979]: established a TCP streaming connection with peer b9aa3e8f97fb3da4 (stream Message reader)
Jun 17 17:23:15 temp-test-02 etcd[20979]: c97ced80d0022441 initialzed peer connection; fast-forwarding 8 ticks (election tick...peer(s)
Jun 17 17:23:18 temp-test-02 etcd[20979]: the clock difference against peer b9aa3e8f97fb3da4 is too high [1.31666398s > 1s]
Jun 17 17:23:18 temp-test-02 etcd[20979]: the clock difference against peer bf5ce1ee0f39370f is too high [5.417104984s > 1s]
Jun 17 17:23:18 temp-test-02 etcd[20979]: updated the cluster version from 3.0 to 3.2
Jun 17 17:23:18 temp-test-02 etcd[20979]: enabled capabilities for version 3.2
Jun 17 17:23:48 temp-test-02 etcd[20979]: the clock difference against peer b9aa3e8f97fb3da4 is too high [1.317042896s > 1s]
Jun 17 17:23:48 temp-test-02 etcd[20979]: the clock difference against peer bf5ce1ee0f39370f is too high [5.417683975s > 1s]
Hint: Some lines were ellipsized, use -l to show in full.

##########查看2379、2380是否处于监听状态
[root@temp-test-02 ~]# ss -tnl
State       Recv-Q Send-Q                      Local Address:Port                                     Peer Address:Port              
LISTEN      0      128                         172.16.169.92:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:2379                                                *:*                  
LISTEN      0      128                         172.16.169.92:2380                                                *:*                  
LISTEN      0      128                                     *:22                                                  *:*                  
LISTEN      0      100                             127.0.0.1:25                                                  *:*                  
LISTEN      0      128                                    :::22                                                 :::*                  
LISTEN      0      100                                   ::1:25                                                 :::*  

##########node-02##########
[root@temp-test-03 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}
[root@temp-test-03 ~]# sed -i s#'PATH=$PATH:$HOME/bin'#'PATH=$PATH:$HOME/bin:/opt/kubernetes/bin'#g .bash_profile
[root@temp-test-03 ~]# source .bash_profile

##########修改ETCD配置文件
[root@temp-test-03 ~]# vim /opt/kubernetes/cfg/etcd.conf
[root@temp-test-03 ~]# cat /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://172.16.169.93:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.169.93:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.169.93:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://172.16.169.91:2380,etcd-node2=https://172.16.169.92:2380,etcd-node3=https://172.16.169.93:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.169.93:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

##########在所有节点上创建etcd存储目录
[root@temp-test-03 ~]# mkdir /var/lib/etcd

##########启动etcd
[root@temp-test-03 ~]# systemctl daemon-reload
[root@temp-test-03 ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /etc/systemd/system/etcd.service.
[root@temp-test-03 ~]# systemctl start etcd

##########查看启动状态
[root@temp-test-03 ~]# systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:23:13 CST; 58s ago
 Main PID: 18787 (etcd)
   Memory: 13.5M
   CGroup: /system.slice/etcd.service
           └─18787 /opt/kubernetes/bin/etcd
Jun 17 17:23:13 temp-test-03 etcd[18787]: serving client requests on 127.0.0.1:2379
Jun 17 17:23:13 temp-test-03 etcd[18787]: published {Name:etcd-node3 ClientURLs:[https://172.16.169.93:2379]} to cluster 6270...42b4f15
Jun 17 17:23:13 temp-test-03 etcd[18787]: ready to serve client requests
Jun 17 17:23:13 temp-test-03 etcd[18787]: serving client requests on 172.16.169.93:2379
Jun 17 17:23:13 temp-test-03 etcd[18787]: established a TCP streaming connection with peer c97ced80d0022441 (stream Message writer)
Jun 17 17:23:13 temp-test-03 etcd[18787]: b9aa3e8f97fb3da4 initialzed peer connection; fast-forwarding 8 ticks (election tick...peer(s)
Jun 17 17:23:17 temp-test-03 etcd[18787]: updated the cluster version from 3.0 to 3.2
Jun 17 17:23:17 temp-test-03 etcd[18787]: enabled capabilities for version 3.2
Jun 17 17:23:18 temp-test-03 etcd[18787]: the clock difference against peer bf5ce1ee0f39370f is too high [4.099884747s > 1s]
Jun 17 17:23:48 temp-test-03 etcd[18787]: the clock difference against peer bf5ce1ee0f39370f is too high [4.100250228s > 1s]
Hint: Some lines were ellipsized, use -l to show in full.

##########查看2379、2380是否处于监听状态
[root@temp-test-03 ~]# ss -tnl
State       Recv-Q Send-Q                      Local Address:Port                                     Peer Address:Port              
LISTEN      0      128                         172.16.169.93:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:2379                                                *:*                  
LISTEN      0      128                         172.16.169.93:2380                                                *:*                  
LISTEN      0      128                                     *:22                                                  *:*                  
LISTEN      0      100                             127.0.0.1:25                                                  *:*                  
LISTEN      0      128                                    :::22                                                 :::*                  
LISTEN      0      100                                   ::1:25 

##########在所有节点部署并启动etcd后,检测集群状态
[root@temp-test-01 ssl]# etcdctl --endpoints=https://172.16.169.91:2379   --ca-file=/opt/kubernetes/ssl/ca.pem   --cert-file=/opt/kubernetes/ssl/etcd.pem   --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member b9aa3e8f97fb3da4 is healthy: got healthy result from https://172.16.169.93:2379
member bf5ce1ee0f39370f is healthy: got healthy result from https://172.16.169.91:2379
member c97ced80d0022441 is healthy: got healthy result from https://172.16.169.92:2379
cluster is healthy

#########################到此etcd部署已经完成#########################

#######部署master节点,Kubernetes API服务部署##########

##########准备软件包
[root@temp-test-01 ~]# cd /usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1/kubernetes				#上前面的操作中,已经解压了程序包
[root@temp-test-01 kubernetes]# ll
total 29536
drwxr-xr-x  2 root root        6 Apr 12 23:16 addons
drwxr-xr-x  3 root root       29 Apr 12 23:16 client
drwxr-xr-x 13 root root     4096 Apr 12 23:24 cluster
drwxr-xr-x  7 root root      123 Apr 12 23:25 docs
drwxr-xr-x 34 root root     4096 Apr 12 23:25 examples
drwxr-xr-x  3 root root       16 Apr 12 23:24 hack
-rw-r--r--  1 root root 24710771 Apr 12 23:17 kubernetes-src.tar.gz
-rw-r--r--  1 root root  5516760 Apr 12 23:17 LICENSES
drwxr-xr-x  3 root root       16 Apr 12 23:16 node
-rw-r--r--  1 root root     3329 Apr 12 23:25 README.md
drwxr-xr-x  3 root root       63 Apr 12 23:16 server
drwxr-xr-x  3 root root       21 Apr 12 23:24 third_party
-rw-r--r--  1 root root        8 Apr 12 23:25 version

[root@temp-test-01 kubernetes]# cp server/bin/kube-apiserver /opt/kubernetes/bin/				#将程序包文件复制至指定目录
[root@temp-test-01 kubernetes]# cp server/bin/kube-controller-manager /opt/kubernetes/bin/
[root@temp-test-01 kubernetes]# cp server/bin/kube-scheduler /opt/kubernetes/bin/

##########创建生成CSR的 JSON 配置文件
[root@temp-test-01 ssl]# cd /usr/local/src/ssl
[root@temp-test-01 ssl]# vim kubernetes-csr.json
[root@temp-test-01 ssl]# cat kubernetes-csr.json 
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "172.16.169.91",
    "10.1.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

##########生成 kubernetes 证书和私钥
[root@temp-test-01 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
2018/06/17 17:37:52 [INFO] generate received request
2018/06/17 17:37:52 [INFO] received CSR
2018/06/17 17:37:52 [INFO] generating key: rsa-2048
2018/06/17 17:37:52 [INFO] encoded CSR
2018/06/17 17:37:52 [INFO] signed certificate with serial number 125332322024329147655280305772724828108797380756
2018/06/17 17:37:52 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

##########发放证书
[root@temp-test-01 ssl]# cp kubernetes*.pem /opt/kubernetes/ssl/
[root@temp-test-01 ssl]# scp kubernetes*.pem 172.16.169.92:/opt/kubernetes/ssl/
kubernetes-key.pem                                                                                   100% 1675     1.6KB/s   00:00    
kubernetes.pem                                                                                       100% 1623     1.6KB/s   00:00    
[root@temp-test-01 ssl]# scp kubernetes*.pem 172.16.169.93:/opt/kubernetes/ssl/
kubernetes-key.pem                                                                                   100% 1675     1.6KB/s   00:00    
kubernetes.pem                                                                                       100% 1623     1.6KB/s   00:00 

##########创建 kube-apiserver 使用的客户端 token 文件
[root@temp-test-01 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '							#如果手动重新生成字符串,请在下面的步骤中对应
ad6d5bb607a186796d8861557df0d17f
[root@temp-test-01 ssl]# vim /opt/kubernetes/ssl/bootstrap-token.csv
[root@temp-test-01 ssl]# cat /opt/kubernetes/ssl/bootstrap-token.csv
ad6d5bb607a186796d8861557df0d17f,kubelet-bootstrap,10001,"system:kubelet-bootstrap"					#请确认字符串完整

##########创建基础用户名/密码认证配置
[root@temp-test-01 ssl]# vim /opt/kubernetes/ssl/basic-auth.csv
[root@temp-test-01 ssl]# cat /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1
readonly,readonly,2

##########部署Kubernetes API Server
[root@temp-test-01 ssl]# vim /usr/lib/systemd/system/kube-apiserver.service
[root@temp-test-01 ssl]# cat /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --bind-address=172.16.169.91 \
  --insecure-bind-address=127.0.0.1 \
  --authorization-mode=Node,RBAC \
  --runtime-config=rbac.authorization.k8s.io/v1 \
  --kubelet-https=true \
  --anonymous-auth=false \
  --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
  --service-cluster-ip-range=10.1.0.0/16 \
  --service-node-port-range=20000-40000 \
  --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
  --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16.169.93:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/kubernetes/log/api-audit.log \
  --event-ttl=1h \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

##########启动API Server服务
[root@temp-test-01 ssl]# systemctl daemon-reload				#重载
[root@temp-test-01 ssl]# systemctl enable kube-apiserver
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@temp-test-01 ssl]# systemctl start kube-apiserver

##########查看状态
[root@temp-test-01 ssl]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:46:46 CST; 12s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 21408 (kube-apiserver)
   Memory: 231.3M
   CGroup: /system.slice/kube-apiserver.service
           └─21408 /opt/kubernetes/bin/kube-apiserver --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStora...

Jun 17 17:46:39 temp-test-01 systemd[1]: Starting Kubernetes API Server...
Jun 17 17:46:39 temp-test-01 kube-apiserver[21408]: Flag --admission-control has been deprecated, Use --enable-admission-plugi...rsion.
Jun 17 17:46:39 temp-test-01 kube-apiserver[21408]: Flag --insecure-bind-address has been deprecated, This flag will be remove...rsion.
Jun 17 17:46:40 temp-test-01 kube-apiserver[21408]: [restful] 2018/06/17 17:46:40 log.go:33: [restful/swagger] listing is avai...gerapi
Jun 17 17:46:40 temp-test-01 kube-apiserver[21408]: [restful] 2018/06/17 17:46:40 log.go:33: [restful/swagger] https://172.16....er-ui/
Jun 17 17:46:42 temp-test-01 kube-apiserver[21408]: [restful] 2018/06/17 17:46:42 log.go:33: [restful/swagger] listing is avai...gerapi
Jun 17 17:46:42 temp-test-01 kube-apiserver[21408]: [restful] 2018/06/17 17:46:42 log.go:33: [restful/swagger] https://172.16....er-ui/
Jun 17 17:46:46 temp-test-01 systemd[1]: Started Kubernetes API Server.
Hint: Some lines were ellipsized, use -l to show in full.

##########查看监听的端口6443、8080
[root@temp-test-01 ssl]# ss -tnl
State       Recv-Q Send-Q                      Local Address:Port                                     Peer Address:Port              
LISTEN      0      128                         172.16.169.91:6443                                                *:*                  
LISTEN      0      128                         172.16.169.91:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:2379                                                *:*                  
LISTEN      0      128                         172.16.169.91:2380                                                *:*                  
LISTEN      0      128                             127.0.0.1:8080                                                *:*                  
LISTEN      0      128                                     *:22                                                  *:*                  
LISTEN      0      100                             127.0.0.1:25                                                  *:*                  
LISTEN      0      128                                    :::22                                                 :::*                  
LISTEN      0      100                                   ::1:25                                                 :::*                  
##########部署Controller Manager服务
[root@temp-test-01 ssl]# vim /usr/lib/systemd/system/kube-controller-manager.service
[root@temp-test-01 ssl]# cat /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.1.0.0/16 \
  --cluster-cidr=10.2.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/opt/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target


##########启动Controller Manager
[root@temp-test-01 ssl]# systemctl daemon-reload
[root@temp-test-01 ssl]# systemctl enable kube-controller-manager
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@temp-test-01 ssl]# systemctl start kube-controller-manager

##########查看服务状态
[root@temp-test-01 ssl]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:50:26 CST; 2min 26s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 21463 (kube-controller)
   Memory: 33.3M
   CGroup: /system.slice/kube-controller-manager.service
           └─21463 /opt/kubernetes/bin/kube-controller-manager --address=127.0.0.1 --master=http://127.0.0.1:8080 --allocate-node-ci...

Jun 17 17:50:26 temp-test-01 systemd[1]: Started Kubernetes Controller Manager.
Jun 17 17:50:26 temp-test-01 systemd[1]: Starting Kubernetes Controller Manager...
Jun 17 17:50:37 temp-test-01 kube-controller-manager[21463]: E0617 17:50:37.056153   21463 core.go:75] Failed to start service ... fail
Jun 17 17:50:38 temp-test-01 kube-controller-manager[21463]: E0617 17:50:38.403420   21463 clusterroleaggregation_controller.go...again
Hint: Some lines were ellipsized, use -l to show in full.

##########查看监听的端口10252
[root@temp-test-01 ssl]# ss -tnl
State       Recv-Q Send-Q                      Local Address:Port                                     Peer Address:Port              
LISTEN      0      128                         172.16.169.91:6443                                                *:*                  
LISTEN      0      128                         172.16.169.91:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:2379                                                *:*                  
LISTEN      0      128                             127.0.0.1:10252                                               *:*                  
LISTEN      0      128                         172.16.169.91:2380                                                *:*                  
LISTEN      0      128                             127.0.0.1:8080                                                *:*                  
LISTEN      0      128                                     *:22                                                  *:*                  
LISTEN      0      100                             127.0.0.1:25                                                  *:*                  
LISTEN      0      128                                    :::22                                                 :::*                  
LISTEN      0      100                                   ::1:25                                                 :::*                  

##########部署Kubernetes Scheduler
[root@temp-test-01 ssl]# vim /usr/lib/systemd/system/kube-scheduler.service
[root@temp-test-01 ssl]# cat /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

##########启动Scheduler服务
[root@temp-test-01 ssl]# systemctl daemon-reload
[root@temp-test-01 ssl]# systemctl enable kube-scheduler
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@temp-test-01 ssl]# systemctl start kube-scheduler

##########查看服务状态
[root@temp-test-01 ssl]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 17:54:41 CST; 7s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 21516 (kube-scheduler)
   Memory: 8.1M
   CGroup: /system.slice/kube-scheduler.service
           └─21516 /opt/kubernetes/bin/kube-scheduler --address=127.0.0.1 --master=http://127.0.0.1:8080 --leader-elect=true --v=2 -...

Jun 17 17:54:41 temp-test-01 systemd[1]: Started Kubernetes Scheduler.
Jun 17 17:54:41 temp-test-01 systemd[1]: Starting Kubernetes Scheduler...

##########部署kubectl 命令行工具

#1.准备二进制命令包
[root@temp-test-01 bin]# cd /usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1/kubernetes/server/bin
[root@temp-test-01 bin]# cp kubectl /opt/kubernetes/bin/

#2.创建 admin 证书签名请求
[root@temp-test-01 bin]# cd /usr/local/src/ssl/
[root@temp-test-01 ssl]# vim admin-csr.json
[root@temp-test-01 ssl]# cat admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}

#3.生成 admin 证书和私钥:
[root@temp-test-01 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
2018/06/17 17:58:19 [INFO] generate received request
2018/06/17 17:58:19 [INFO] received CSR
2018/06/17 17:58:19 [INFO] generating key: rsa-2048
2018/06/17 17:58:20 [INFO] encoded CSR
2018/06/17 17:58:20 [INFO] signed certificate with serial number 364394824851171656180531771892456201332369073973
2018/06/17 17:58:20 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@temp-test-01 ssl]# ls -l admin*
-rw-r--r-- 1 root root 1013 Jun 17 17:58 admin.csr
-rw-r--r-- 1 root root  233 Jun 17 17:58 admin-csr.json
-rw------- 1 root root 1679 Jun 17 17:58 admin-key.pem
-rw-r--r-- 1 root root 1411 Jun 17 17:58 admin.pem

[root@temp-test-01 ssl]# mv admin*.pem /opt/kubernetes/ssl/

#4.设置集群参数
[root@temp-test-01 ssl]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.16.169.91:6443
Cluster "kubernetes" set.

#5.设置客户端认证参数
[root@temp-test-01 ssl]# kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/opt/kubernetes/ssl/admin-key.pem
User "admin" set.

#6.设置上下文参数
[root@temp-test-01 ssl]# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
Context "kubernetes" created.

#7.设置默认上下文
[root@temp-test-01 ssl]# kubectl config use-context kubernetes
Switched to context "kubernetes".

#8.使用kubectl工具
[root@temp-test-01 ssl]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}  

##########部署kubelet##########

#1.二进制包准备 将软件包从master复制到另外两个node中去。
[root@temp-test-01 kubernetes]# cd /usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1/kubernetes/server/bin/
[root@temp-test-01 bin]# cp kubelet kube-proxy /opt/kubernetes/bin/
[root@temp-test-01 bin]# scp kubelet kube-proxy 172.16.169.92:/opt/kubernetes/bin/
kubelet                                                                                              100%  146MB 145.7MB/s   00:01    
kube-proxy                                                                                           100%   49MB  49.0MB/s   00:00    
[root@temp-test-01 bin]# scp kubelet kube-proxy 172.16.169.93:/opt/kubernetes/bin/
kubelet                                                                                              100%  146MB 145.7MB/s   00:01    
kube-proxy                                                                                           100%   49MB  49.0MB/s   00:00    

#2.创建角色绑定
[root@temp-test-01 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io "kubelet-bootstrap" created

#3.创建 kubelet bootstrapping kubeconfig 文件 设置集群参数
[root@temp-test-01 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.16.169.91:6443 \
--kubeconfig=bootstrap.kubeconfig
Cluster "kubernetes" set.

#4.设置客户端认证参数
[root@temp-test-01 ~]# kubectl config set-credentials kubelet-bootstrap \
--token=ad6d5bb607a186796d8861557df0d17f \
--kubeconfig=bootstrap.kubeconfig
User "kubelet-bootstrap" set.

#5.设置上下文参数
[root@temp-test-01 ~]# kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
Context "default" created.

#6.选择默认上下文
[root@temp-test-01 ~]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
Switched to context "default".

[root@temp-test-01 ~]# cp bootstrap.kubeconfig /opt/kubernetes/cfg
[root@temp-test-01 ~]# scp bootstrap.kubeconfig 172.16.169.92:/opt/kubernetes/cfg
bootstrap.kubeconfig                                                                                 100% 2183     2.1KB/s   00:00    
[root@temp-test-01 ~]# scp bootstrap.kubeconfig 172.16.169.93:/opt/kubernetes/cfg
bootstrap.kubeconfig                                                                                 100% 2183     2.1KB/s   00:00    


###############################以下部署在node节点上完成###############################

#1.部署kubelet 1.设置CNI支持,#网络接口插件
#node01配置
[root@temp-test-02 ~]# mkdir -p /etc/cni/net.d
[root@temp-test-02 ~]# vim /etc/cni/net.d/10-default.conf
[root@temp-test-02 ~]# cat /etc/cni/net.d/10-default.conf
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}

#node02配置
[root@temp-test-03 ~]#  mkdir -p /etc/cni/net.d
[root@temp-test-03 ~]# vim /etc/cni/net.d/10-default.conf
[root@temp-test-03 ~]# cat /etc/cni/net.d/10-default.conf
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}

#2.创建kubelet目录
#node01配置
[root@temp-test-02 ~]# mkdir /var/lib/kubelet

#node02配置
[root@temp-test-03 ~]# mkdir /var/lib/kubelet

#3.创建kubelet服务配置

#node01配置
[root@temp-test-02 ~]# vim /usr/lib/systemd/system/kubelet.service
[root@temp-test-02 ~]# cat /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=172.16.169.92 \
  --hostname-override=172.16.169.92 \
  --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.1.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5

#node02配置
[root@temp-test-03 ~]# vim /usr/lib/systemd/system/kubelet.service
[root@temp-test-03 ~]# cat /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=172.16.169.93 \
  --hostname-override=172.16.169.93 \
  --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.1.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5


#4.启动Kubelet

#node01配置
[root@temp-test-02 ~]# systemctl daemon-reload
[root@temp-test-02 ~]# systemctl enable kubelet
[root@temp-test-02 ~]# systemctl start kubelet

#node02配置
[root@temp-test-03 ~]# systemctl daemon-reload
[root@temp-test-03 ~]# systemctl enable kubelet
[root@temp-test-03 ~]# systemctl start kubelet

#5.查看服务状态

#node01配置
[root@temp-test-02 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; static; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 18:30:51 CST; 6s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 21103 (kubelet)
   Memory: 13.4M
   CGroup: /system.slice/kubelet.service
           └─21103 /opt/kubernetes/bin/kubelet --address=172.16.169.92 --hostname-override=172.16.169.92 --pod-infra-container-image...

Jun 17 18:30:51 temp-test-02 systemd[1]: Started Kubernetes Kubelet.
Jun 17 18:30:51 temp-test-02 systemd[1]: Starting Kubernetes Kubelet...
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --address has been deprecated, This parameter should be set via the config f...ation.
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --cluster-dns has been deprecated, This parameter should be set via the conf...ation.
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --cluster-domain has been deprecated, This parameter should be set via the c...ation.
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --hairpin-mode has been deprecated, This parameter should be set via the con...ation.
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --allow-privileged has been deprecated, will be removed in a future version
Jun 17 18:30:51 temp-test-02 kubelet[21103]: Flag --fail-swap-on has been deprecated, This parameter should be set via the con...ation.
Hint: Some lines were ellipsized, use -l to show in full.

#node02配置
[root@temp-test-03 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; static; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 18:41:32 CST; 5s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 18908 (kubelet)
   Memory: 14.6M
   CGroup: /system.slice/kubelet.service
           └─18908 /opt/kubernetes/bin/kubelet --address=172.16.169.93 --hostname-override=172.16.169.93 --pod-infra-container-image...

Jun 17 18:41:32 temp-test-03 systemd[1]: Started Kubernetes Kubelet.
Jun 17 18:41:32 temp-test-03 systemd[1]: Starting Kubernetes Kubelet...
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --address has been deprecated, This parameter should be set via the config f...ation.
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --cluster-dns has been deprecated, This parameter should be set via the conf...ation.
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --cluster-domain has been deprecated, This parameter should be set via the c...ation.
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --hairpin-mode has been deprecated, This parameter should be set via the con...ation.
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --allow-privileged has been deprecated, will be removed in a future version
Jun 17 18:41:32 temp-test-03 kubelet[18908]: Flag --fail-swap-on has been deprecated, This parameter should be set via the con...ation.
Hint: Some lines were ellipsized, use -l to show in full.

#6.查看csr请求 注意是在master上执行。
[root@temp-test-01 ~]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-V35dgJUzhtkgUn1p7jEmumeqcxXZIA4jtWHgvngWrqM   3m        kubelet-bootstrap   Pending
node-csr-MZTOBa1cs6UNAg2-CypDfTqX3mjGfknin41_xAkRYIs   16s       kubelet-bootstrap   Pending

#7.批准kubelet 的 TLS 证书请求
[root@temp-test-01 ~]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
certificatesigningrequest.certificates.k8s.io "node-csr-V35dgJUzhtkgUn1p7jEmumeqcxXZIA4jtWHgvngWrqM" approved

[root@temp-test-01 ~]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-MZTOBa1cs6UNAg2-CypDfTqX3mjGfknin41_xAkRYIs   33s       kubelet-bootstrap   Approved,Issued
node-csr-V35dgJUzhtkgUn1p7jEmumeqcxXZIA4jtWHgvngWrqM   11m       kubelet-bootstrap   Approved,Issued

#8.执行完毕后,查看节点状态已经是Ready的状态了 
[root@temp-test-01 ~]# kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
172.16.169.92   Ready     <none>    7m        v1.10.1
172.16.169.93   Ready     <none>    16s       v1.10.1


##########部署Kubernetes Proxy##########

#1.配置kube-proxy使用LVS
#node01配置
[root@temp-test-02 ~]# yum install -y ipvsadm ipset conntrack
#node02配置
[root@temp-test-03 ~]# yum install -y ipvsadm ipset conntrack

#2.创建 kube-proxy 证书请求
[root@temp-test-01 ~]# cd /usr/local/src/ssl/
[root@temp-test-01 ssl]# vim kube-proxy-csr.json
[root@temp-test-01 ssl]# cat kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

#3.生成证书
[root@temp-test-01 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
2018/06/17 18:59:46 [INFO] generate received request
2018/06/17 18:59:46 [INFO] received CSR
2018/06/17 18:59:46 [INFO] generating key: rsa-2048
2018/06/17 18:59:47 [INFO] encoded CSR
2018/06/17 18:59:47 [INFO] signed certificate with serial number 298287035733857230115897690582410319515133056558
2018/06/17 18:59:47 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

#4.分发证书到所有Node节点
[root@temp-test-01 ssl]# cp kube-proxy*.pem /opt/kubernetes/ssl/
[root@temp-test-01 ssl]# scp kube-proxy*.pem 172.16.169.92:/opt/kubernetes/ssl/
kube-proxy-key.pem                                                                                   100% 1679     1.6KB/s   00:00    
kube-proxy.pem                                                                                       100% 1411     1.4KB/s   00:00    
[root@temp-test-01 ssl]# scp kube-proxy*.pem 172.16.169.93:/opt/kubernetes/ssl/
kube-proxy-key.pem                                                                                   100% 1679     1.6KB/s   00:00    
kube-proxy.pem                                                                                       100% 1411     1.4KB/s   00:00    

#5.创建kube-proxy配置文件
[root@temp-test-01 ssl]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.16.169.91:6443 \
--kubeconfig=kube-proxy.kubeconfig
Cluster "kubernetes" set.

[root@temp-test-01 ssl]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
User "kube-proxy" set.

[root@temp-test-01 ssl]# kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
Context "default" created.

[root@temp-test-01 ssl]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
Switched to context "default".

#6.分发kubeconfig配置文件
[root@temp-test-01 ssl]# cp kube-proxy.kubeconfig /opt/kubernetes/cfg/
[root@temp-test-01 ssl]# scp kube-proxy.kubeconfig 172.16.169.92:/opt/kubernetes/cfg/
kube-proxy.kubeconfig                                                                                100% 6301     6.2KB/s   00:00    
[root@temp-test-01 ssl]# scp kube-proxy.kubeconfig 172.16.169.93:/opt/kubernetes/cfg/
kube-proxy.kubeconfig                                                                                100% 6301     6.2KB/s   00:00    



#7.创建kube-proxy服务配置

##node01配置
[root@temp-test-02 ~]# mkdir /var/lib/kube-proxy
[root@temp-test-02 ~]# vim /usr/lib/systemd/system/kube-proxy.service
[root@temp-test-02 ~]# cat /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=172.16.169.92 \
  --hostname-override=172.16.169.92 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

#启动Kubernetes Proxy
[root@temp-test-02 ~]# systemctl daemon-reload
[root@temp-test-02 ~]# systemctl enable kube-proxy
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@temp-test-02 ~]# systemctl start kube-proxy


##node02配置
[root@temp-test-03 ~]# mkdir /var/lib/kube-proxy
[root@temp-test-03 ~]# vim /usr/lib/systemd/system/kube-proxy.service
[root@temp-test-03 ~]# cat /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=172.16.169.93 \
  --hostname-override=172.16.169.93 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

#启动Kubernetes Proxy
[root@temp-test-03 ~]# systemctl daemon-reload
[root@temp-test-03 ~]# systemctl enable kube-proxy
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@temp-test-03 ~]# systemctl start kube-proxy

#8.查看服务状态 查看kube-proxy服务状态
[root@temp-test-02 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube-Proxy Server
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 19:13:26 CST; 14s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 21739 (kube-proxy)
   Memory: 8.4M
   CGroup: /system.slice/kube-proxy.service
           ‣ 21739 /opt/kubernetes/bin/kube-proxy --bind-address=172.16.168.92 --hostname-override=172.16.168.92 --kubeconfig=/opt/k...

Jun 17 19:13:26 temp-test-02 systemd[1]: Started Kubernetes Kube-Proxy Server.
Jun 17 19:13:26 temp-test-02 systemd[1]: Starting Kubernetes Kube-Proxy Server...

[root@temp-test-03 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube-Proxy Server
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 19:13:27 CST; 9s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 19463 (kube-proxy)
   Memory: 8.4M
   CGroup: /system.slice/kube-proxy.service
           ‣ 19463 /opt/kubernetes/bin/kube-proxy --bind-address=172.16.168.93 --hostname-override=172.16.168.93 --kubeconfig=/opt/k...

Jun 17 19:13:27 temp-test-03 systemd[1]: Started Kubernetes Kube-Proxy Server.
Jun 17 19:13:27 temp-test-03 systemd[1]: Starting Kubernetes Kube-Proxy Server...

#9.检查LVS状态
[root@temp-test-02 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      0          0   

  
[root@temp-test-03 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      0          0   

如果你在两台实验机器都安装了kubelet和proxy服务,使用下面的命令可以检查状态:

[root@temp-test-01 ssl]# kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
172.16.169.92   Ready     <none>    38m       v1.10.1
172.16.169.93   Ready     <none>    32m       v1.10.1

############################### 部署Flannel网络服务器 ###############################

#1.为Flannel生成证书
[root@temp-test-01 ssl]# cd /usr/local/src/ssl
[root@temp-test-01 ssl]# vim flanneld-csr.json
[root@temp-test-01 ssl]# cat flanneld-csr.json
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangzhou",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

#2.生成证书
[root@temp-test-01 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
2018/06/17 19:20:09 [INFO] generate received request
2018/06/17 19:20:09 [INFO] received CSR
2018/06/17 19:20:09 [INFO] generating key: rsa-2048
2018/06/17 19:20:09 [INFO] encoded CSR
2018/06/17 19:20:10 [INFO] signed certificate with serial number 587118205512248630260038303839323774783977614329
2018/06/17 19:20:10 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

#3.分发证书
[root@temp-test-01 ssl]# cp flanneld*.pem /opt/kubernetes/ssl/
[root@temp-test-01 ssl]# scp flanneld*.pem 172.16.169.92:/opt/kubernetes/ssl/
flanneld-key.pem                                                                                     100% 1679     1.6KB/s   00:00    
flanneld.pem                                                                                         100% 1399     1.4KB/s   00:00    
[root@temp-test-01 ssl]# scp flanneld*.pem 172.16.169.93:/opt/kubernetes/ssl/
flanneld-key.pem                                                                                     100% 1679     1.6KB/s   00:00    
flanneld.pem                                                                                         100% 1399     1.4KB/s   00:00    

#4.下载Flannel软件包
[root@temp-test-01 ssl]# cd /usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1/
[root@temp-test-01 k8s-v1.10.1]# ll
total 580292
-rw-r--r--  1 root   root   17108856 Apr 12 17:35 cni-plugins-amd64-v0.7.1.tgz
drwxr-xr-x  3 478493 89939       117 Mar 30 01:49 etcd-v3.2.18-linux-amd64
-rw-r--r--  1 root   root   10562874 Mar 30 01:58 etcd-v3.2.18-linux-amd64.tar.gz
-rw-r--r--  1 root   root    9706487 Jan 24 02:58 flannel-v0.10.0-linux-amd64.tar.gz
drwxr-xr-x 11 root   root       4096 Apr 12 23:17 kubernetes
-rw-r--r--  1 root   root   13344537 Apr 13 01:51 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root   root  112427817 Apr 13 01:51 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root   root  428337777 Apr 13 01:51 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root   root    2716855 Apr 13 01:51 kubernetes.tar.gz

[root@temp-test-01 k8s-v1.10.1]# tar zxf flannel-v0.10.0-linux-amd64.tar.gz										#解压
[root@temp-test-01 k8s-v1.10.1]# ll
total 615784
-rw-r--r--  1 root   root   17108856 Apr 12 17:35 cni-plugins-amd64-v0.7.1.tgz
drwxr-xr-x  3 478493 89939       117 Mar 30 01:49 etcd-v3.2.18-linux-amd64
-rw-r--r--  1 root   root   10562874 Mar 30 01:58 etcd-v3.2.18-linux-amd64.tar.gz
-rwxr-xr-x  1   1001  1001  36327752 Jan 24 02:25 flanneld
-rw-r--r--  1 root   root    9706487 Jan 24 02:58 flannel-v0.10.0-linux-amd64.tar.gz
drwxr-xr-x 11 root   root       4096 Apr 12 23:17 kubernetes
-rw-r--r--  1 root   root   13344537 Apr 13 01:51 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root   root  112427817 Apr 13 01:51 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root   root  428337777 Apr 13 01:51 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root   root    2716855 Apr 13 01:51 kubernetes.tar.gz
-rwxr-xr-x  1   1001  1001      2139 Mar 18  2017 mk-docker-opts.sh
-rw-rw-r--  1   1001  1001      4298 Dec 24 01:28 README.md

##复制对应脚本到/opt/kubernetes/bin目录下
[root@temp-test-01 k8s-v1.10.1]# cp flanneld mk-docker-opts.sh /opt/kubernetes/bin/								#
[root@temp-test-01 k8s-v1.10.1]# scp flanneld mk-docker-opts.sh 172.16.169.92:/opt/kubernetes/bin/
flanneld                                                                                             100%   35MB  34.6MB/s   00:01    
mk-docker-opts.sh                                                                                    100% 2139     2.1KB/s   00:00    
[root@temp-test-01 k8s-v1.10.1]# scp flanneld mk-docker-opts.sh 172.16.169.93:/opt/kubernetes/bin/
flanneld                                                                                             100%   35MB  34.6MB/s   00:01    
mk-docker-opts.sh                                                                                    100% 2139     2.1KB/s   00:00    

##复制对应脚本到/opt/kubernetes/bin目录下
[root@temp-test-01 k8s-v1.10.1]# cd kubernetes/cluster/centos/node/bin/
[root@temp-test-01 bin]# ll
total 8
-rwxr-xr-x 1 root root 2590 Apr 12 23:24 mk-docker-opts.sh
-rwxr-xr-x 1 root root  850 Apr 12 23:24 remove-docker0.sh
[root@temp-test-01 bin]# cp remove-docker0.sh /opt/kubernetes/bin/
[root@temp-test-01 bin]# scp remove-docker0.sh 172.16.169.92:/opt/kubernetes/bin/
remove-docker0.sh                                                                                    100%  850     0.8KB/s   00:00    
[root@temp-test-01 bin]# scp remove-docker0.sh 172.16.169.93:/opt/kubernetes/bin/
remove-docker0.sh                                                                                    100%  850     0.8KB/s   00:00  

#5.配置Flannel
[root@temp-test-01 ~]# vim /opt/kubernetes/cfg/flannel
[root@temp-test-01 ~]# cat /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16.169.93:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

#将配置文件复制至其他节点
[root@temp-test-01 ~]# scp /opt/kubernetes/cfg/flannel 172.16.169.92:/opt/kubernetes/cfg/
flannel                                                                                              100%  375     0.4KB/s   00:00    
[root@temp-test-01 ~]# scp /opt/kubernetes/cfg/flannel 172.16.169.93:/opt/kubernetes/cfg/
flannel                                                                                              100%  375     0.4KB/s   00:00    

#6.设置Flannel系统服务
[root@temp-test-01 ~]# vim /usr/lib/systemd/system/flannel.service
[root@temp-test-01 ~]# cat /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker

Type=notify

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service

#复制系统服务脚本至其它节点
[root@temp-test-01 ~]# scp /usr/lib/systemd/system/flannel.service 172.16.169.92:/usr/lib/systemd/system/
flannel.service                                                                                      100%  504     0.5KB/s   00:00    
[root@temp-test-01 ~]# scp /usr/lib/systemd/system/flannel.service 172.16.169.93:/usr/lib/systemd/system/
flannel.service                                                                                      100%  504     0.5KB/s   00:00    

############################### 部署Flannel CNI集成 ###############################

#下载CNI插件
[root@temp-test-01 ~]# cd /usr/local/src/k8s-v1.10.1-manual/k8s-v1.10.1/
[root@temp-test-01 k8s-v1.10.1]# ll
total 615784
-rw-r--r--  1 root   root   17108856 Apr 12 17:35 cni-plugins-amd64-v0.7.1.tgz
drwxr-xr-x  3 478493 89939       117 Mar 30 01:49 etcd-v3.2.18-linux-amd64
-rw-r--r--  1 root   root   10562874 Mar 30 01:58 etcd-v3.2.18-linux-amd64.tar.gz
-rwxr-xr-x  1   1001  1001  36327752 Jan 24 02:25 flanneld
-rw-r--r--  1 root   root    9706487 Jan 24 02:58 flannel-v0.10.0-linux-amd64.tar.gz
drwxr-xr-x 11 root   root       4096 Apr 12 23:17 kubernetes
-rw-r--r--  1 root   root   13344537 Apr 13 01:51 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root   root  112427817 Apr 13 01:51 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root   root  428337777 Apr 13 01:51 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root   root    2716855 Apr 13 01:51 kubernetes.tar.gz
-rwxr-xr-x  1   1001  1001      2139 Mar 18  2017 mk-docker-opts.sh
-rw-rw-r--  1   1001  1001      4298 Dec 24 01:28 README.md

#创建相应目录
[root@temp-test-01 k8s-v1.10.1]# mkdir /opt/kubernetes/bin/cni

#同时也在其他节点上创建
[root@temp-test-02 ~]# mkdir /opt/kubernetes/bin/cni
[root@temp-test-03 ~]# mkdir /opt/kubernetes/bin/cni

#解压压缩包
[root@temp-test-01 k8s-v1.10.1]# tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni

#将文件包复制至其他节点
[root@temp-test-01 k8s-v1.10.1]# scp -r /opt/kubernetes/bin/cni/* 172.16.169.92:/opt/kubernetes/bin/cni/
bridge                                                                                               100% 3934KB   3.8MB/s   00:00    
dhcp                                                                                                 100% 9993KB   9.8MB/s   00:00    
flannel                                                                                              100% 2789KB   2.7MB/s   00:00    
host-device                                                                                          100% 3054KB   3.0MB/s   00:00    
host-local                                                                                           100% 2966KB   2.9MB/s   00:00    
ipvlan                                                                                               100% 3489KB   3.4MB/s   00:00    
loopback                                                                                             100% 3012KB   2.9MB/s   00:00    
macvlan                                                                                              100% 3529KB   3.5MB/s   00:00    
portmap                                                                                              100% 3468KB   3.4MB/s   00:00    
ptp                                                                                                  100% 3900KB   3.8MB/s   00:00    
sample                                                                                               100% 2580KB   2.5MB/s   00:00    
tuning                                                                                               100% 2783KB   2.7MB/s   00:00    
vlan                                                                                                 100% 3485KB   3.4MB/s   00:00    
[root@temp-test-01 k8s-v1.10.1]# scp -r /opt/kubernetes/bin/cni/* 172.16.169.93:/opt/kubernetes/bin/cni/
bridge                                                                                               100% 3934KB   3.8MB/s   00:00    
dhcp                                                                                                 100% 9993KB   9.8MB/s   00:00    
flannel                                                                                              100% 2789KB   2.7MB/s   00:01    
host-device                                                                                          100% 3054KB   3.0MB/s   00:00    
host-local                                                                                           100% 2966KB   2.9MB/s   00:00    
ipvlan                                                                                               100% 3489KB   3.4MB/s   00:00    
loopback                                                                                             100% 3012KB   2.9MB/s   00:00    
macvlan                                                                                              100% 3529KB   3.5MB/s   00:00    
portmap                                                                                              100% 3468KB   3.4MB/s   00:00    
ptp                                                                                                  100% 3900KB   3.8MB/s   00:00    
sample                                                                                               100% 2580KB   2.5MB/s   00:00    
tuning                                                                                               100% 2783KB   2.7MB/s   00:00    
vlan                                                                                                 100% 3485KB   3.4MB/s   00:00    

#创建Etcd的key
[root@temp-test-01 ~]# /opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \
--no-sync -C https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16.169.93:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}'
{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}									#输出的结果

#启动flannel	#master
[root@temp-test-01 ~]# systemctl daemon-reload
[root@temp-test-01 ~]# systemctl enable flannel
Created symlink from /etc/systemd/system/multi-user.target.wants/flannel.service to /usr/lib/systemd/system/flannel.service.
Created symlink from /etc/systemd/system/docker.service.requires/flannel.service to /usr/lib/systemd/system/flannel.service.
[root@temp-test-01 ~]# chmod +x /opt/kubernetes/bin/*
[root@temp-test-01 ~]# systemctl start flannel

#启动flannel	#node01
[root@temp-test-02 ~]# systemctl daemon-reload
[root@temp-test-02 ~]# systemctl enable flannel
Created symlink from /etc/systemd/system/multi-user.target.wants/flannel.service to /usr/lib/systemd/system/flannel.service.
Created symlink from /etc/systemd/system/docker.service.requires/flannel.service to /usr/lib/systemd/system/flannel.service.
[root@temp-test-02 ~]# chmod +x /opt/kubernetes/bin/*
[root@temp-test-02 ~]# systemctl start flannel

#启动flannel	#node02
[root@temp-test-03 ~]# mkdir /opt/kubernetes/bin/cni
[root@temp-test-03 ~]# systemctl daemon-reload
[root@temp-test-03 ~]# systemctl enable flannel
Created symlink from /etc/systemd/system/multi-user.target.wants/flannel.service to /usr/lib/systemd/system/flannel.service.
Created symlink from /etc/systemd/system/docker.service.requires/flannel.service to /usr/lib/systemd/system/flannel.service.
[root@temp-test-03 ~]# chmod +x /opt/kubernetes/bin/*
[root@temp-test-03 ~]# systemctl start flannel

#查看服务状态
[root@temp-test-01 ~]# systemctl status flannel
● flannel.service - Flanneld overlay address etcd agent
   Loaded: loaded (/usr/lib/systemd/system/flannel.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 19:36:39 CST; 8s ago
  Process: 22313 ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker (code=exited, status=0/SUCCESS)
  Process: 22292 ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh (code=exited, status=0/SUCCESS)
 Main PID: 22299 (flanneld)
   Memory: 8.8M
   CGroup: /system.slice/flannel.service
           └─22299 /opt/kubernetes/bin/flanneld -etcd-endpoints=https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16...
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.507265   22299 main.go:300] Wrote subnet file to /run/flannel/subnet.env
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.507294   22299 main.go:304] Running backend.
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.507567   22299 vxlan_network.go:60] watching for new subnet leases
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.512392   22299 iptables.go:115] Some iptables rules are missing; ... rules
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.512419   22299 iptables.go:137] Deleting iptables rule: -s 10.2.0...ACCEPT
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.513800   22299 main.go:396] Waiting for 22h59m59.990401959s to renew lease
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.519189   22299 iptables.go:137] Deleting iptables rule: -d 10.2.0...ACCEPT
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.523359   22299 iptables.go:125] Adding iptables rule: -s 10.2.0.0...ACCEPT
Jun 17 19:36:39 temp-test-01 flanneld[22299]: I0617 19:36:39.531385   22299 iptables.go:125] Adding iptables rule: -d 10.2.0.0...ACCEPT
Jun 17 19:36:39 temp-test-01 systemd[1]: Started Flanneld overlay address etcd agent.
Hint: Some lines were ellipsized, use -l to show in full.

#查看服务状态
[root@temp-test-02 ~]# systemctl status flannel
● flannel.service - Flanneld overlay address etcd agent
   Loaded: loaded (/usr/lib/systemd/system/flannel.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 19:38:31 CST; 10s ago
  Process: 29414 ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker (code=exited, status=0/SUCCESS)
  Process: 29393 ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh (code=exited, status=0/SUCCESS)
 Main PID: 29400 (flanneld)
   Memory: 8.6M
   CGroup: /system.slice/flannel.service
           └─29400 /opt/kubernetes/bin/flanneld -etcd-endpoints=https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16...
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.097455   29400 main.go:300] Wrote subnet file to /run/flannel/subnet.env
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.097485   29400 main.go:304] Running backend.
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.097874   29400 vxlan_network.go:60] watching for new subnet leases
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.101295   29400 iptables.go:115] Some iptables rules are missing; ... rules
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.101314   29400 iptables.go:137] Deleting iptables rule: -s 10.2.0...ACCEPT
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.102524   29400 iptables.go:137] Deleting iptables rule: -d 10.2.0...ACCEPT
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.105054   29400 iptables.go:125] Adding iptables rule: -s 10.2.0.0...ACCEPT
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.105563   29400 main.go:396] Waiting for 22h59m58.671752689s to renew lease
Jun 17 19:38:31 temp-test-02 flanneld[29400]: I0617 19:38:31.107376   29400 iptables.go:125] Adding iptables rule: -d 10.2.0.0...ACCEPT
Jun 17 19:38:31 temp-test-02 systemd[1]: Started Flanneld overlay address etcd agent.
Hint: Some lines were ellipsized, use -l to show in full.

#查看服务状态
[root@temp-test-03 ~]# systemctl status flannel
● flannel.service - Flanneld overlay address etcd agent
   Loaded: loaded (/usr/lib/systemd/system/flannel.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2018-06-17 19:38:27 CST; 16s ago
  Process: 27107 ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker (code=exited, status=0/SUCCESS)
  Process: 27085 ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh (code=exited, status=0/SUCCESS)
 Main PID: 27092 (flanneld)
   Memory: 9.0M
   CGroup: /system.slice/flannel.service
           └─27092 /opt/kubernetes/bin/flanneld -etcd-endpoints=https://172.16.169.91:2379,https://172.16.169.92:2379,https://172.16...
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.538327   27092 main.go:300] Wrote subnet file to /run/flannel/subnet.env
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.538344   27092 main.go:304] Running backend.
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.538709   27092 vxlan_network.go:60] watching for new subnet leases
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.541532   27092 main.go:396] Waiting for 22h59m55.892656717s to renew lease
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.545148   27092 iptables.go:115] Some iptables rules are missing; ... rules
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.545167   27092 iptables.go:137] Deleting iptables rule: -s 10.2.0...ACCEPT
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.556892   27092 iptables.go:137] Deleting iptables rule: -d 10.2.0...ACCEPT
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.558541   27092 iptables.go:125] Adding iptables rule: -s 10.2.0.0...ACCEPT
Jun 17 19:38:27 temp-test-03 flanneld[27092]: I0617 19:38:27.562161   27092 iptables.go:125] Adding iptables rule: -d 10.2.0.0...ACCEPT
Jun 17 19:38:27 temp-test-03 systemd[1]: Started Flanneld overlay address etcd agent.
Hint: Some lines were ellipsized, use -l to show in full.

##########配置Docker使用Flannel网络
[root@temp-test-01 ~]# vim /usr/lib/systemd/system/docker.service
[root@temp-test-01 ~]# cat /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service flannel.service				#修改项
Wants=network-online.target
Requires=flannel.service													#增加项

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=-/run/flannel/docker										#增加项
ExecStart=/usr/bin/dockerd  $DOCKER_OPTS									#修改项
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

#将配置复制到另外两个阶段
[root@temp-test-01 ~]# scp /usr/lib/systemd/system/docker.service 172.16.169.92:/usr/lib/systemd/system/
docker.service                                                                                       100% 1218     1.2KB/s   00:00    
[root@temp-test-01 ~]# scp /usr/lib/systemd/system/docker.service 172.16.169.93:/usr/lib/systemd/system/
docker.service                                                                                       100% 1218     1.2KB/s   00:00    

#重启Docker			#每个节点都需要重启
[root@temp-test-01 ~]# systemctl daemon-reload
[root@temp-test-01 ~]# systemctl restart docker

############################### 到目前为止,kubernetes基础组件已经部署完成 ###############################

#查看网卡状态
#注意docker核flannel是否处于一个包含关系,如果docker的网段没有发生变化,那么是配置不正确。

#master
[root@temp-test-01 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:dc:cd:4a brd ff:ff:ff:ff:ff:ff
    inet 172.16.169.91/24 brd 172.16.169.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fedc:cd4a/64 scope link 
       valid_lft forever preferred_lft forever
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN 
    link/ether d6:53:1b:30:f0:09 brd ff:ff:ff:ff:ff:ff
    inet 10.2.34.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::d453:1bff:fe30:f009/64 scope link 
       valid_lft forever preferred_lft forever
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:74:80:aa:22 brd ff:ff:ff:ff:ff:ff
    inet 10.2.34.1/24 brd 10.2.34.255 scope global docker0
       valid_lft forever preferred_lft forever

#node01
[root@temp-test-02 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:47:5b:a4 brd ff:ff:ff:ff:ff:ff
    inet 172.16.169.92/24 brd 172.16.169.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe47:5ba4/64 scope link 
       valid_lft forever preferred_lft forever
4: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 82:b5:29:11:b3:a8 brd ff:ff:ff:ff:ff:ff
5: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 8e:be:df:27:3d:aa brd ff:ff:ff:ff:ff:ff
    inet 10.1.0.1/32 brd 10.1.0.1 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
6: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN 
    link/ether be:b4:36:53:36:c9 brd ff:ff:ff:ff:ff:ff
    inet 10.2.38.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::bcb4:36ff:fe53:36c9/64 scope link 
       valid_lft forever preferred_lft forever
7: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP 
    link/ether 02:42:ed:4b:82:36 brd ff:ff:ff:ff:ff:ff
    inet 10.2.38.1/24 brd 10.2.38.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:edff:fe4b:8236/64 scope link 
       valid_lft forever preferred_lft forever

#node02
[root@temp-test-03 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:81:79:2c brd ff:ff:ff:ff:ff:ff
    inet 172.16.169.93/24 brd 172.16.169.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe81:792c/64 scope link 
       valid_lft forever preferred_lft forever
4: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 52:bc:79:c6:5d:18 brd ff:ff:ff:ff:ff:ff
5: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 1a:8b:a9:74:99:95 brd ff:ff:ff:ff:ff:ff
    inet 10.1.0.1/32 brd 10.1.0.1 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
6: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN 
    link/ether 22:96:b8:d0:07:6b brd ff:ff:ff:ff:ff:ff
    inet 10.2.83.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::2096:b8ff:fed0:76b/64 scope link 
       valid_lft forever preferred_lft forever
7: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP 
    link/ether 02:42:b7:cc:82:fa brd ff:ff:ff:ff:ff:ff
    inet 10.2.83.1/24 brd 10.2.83.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:b7ff:fecc:82fa/64 scope link 
       valid_lft forever preferred_lft forever



#node节点,随着service的增加,也会增加多个网段,如下:
[root@temp-test-02 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:47:5b:a4 brd ff:ff:ff:ff:ff:ff
    inet 172.16.169.92/24 brd 172.16.169.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe47:5ba4/64 scope link 
       valid_lft forever preferred_lft forever
4: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 82:b5:29:11:b3:a8 brd ff:ff:ff:ff:ff:ff
5: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN 
    link/ether 8e:be:df:27:3d:aa brd ff:ff:ff:ff:ff:ff
    inet 10.1.0.1/32 brd 10.1.0.1 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.212.20/32 brd 10.1.212.20 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.0.2/32 brd 10.1.0.2 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.138.21/32 brd 10.1.138.21 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.212.103/32 brd 10.1.212.103 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
6: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN 
    link/ether be:b4:36:53:36:c9 brd ff:ff:ff:ff:ff:ff
    inet 10.2.38.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::bcb4:36ff:fe53:36c9/64 scope link 
       valid_lft forever preferred_lft forever
7: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP 
    link/ether 02:42:ed:4b:82:36 brd ff:ff:ff:ff:ff:ff
    inet 10.2.38.1/24 brd 10.2.38.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:edff:fe4b:8236/64 scope link 
       valid_lft forever preferred_lft forever
12: veth737e2781@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether e6:db:b9:91:44:e6 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::e4db:b9ff:fe91:44e6/64 scope link 
       valid_lft forever preferred_lft forever
13: veth5a3c3059@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether 16:a8:b7:54:ef:e9 brd ff:ff:ff:ff:ff:ff link-netnsid 2
    inet6 fe80::14a8:b7ff:fe54:efe9/64 scope link 
       valid_lft forever preferred_lft forever
14: veth8b4d8b03@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether 42:8c:79:82:ec:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::408c:79ff:fe82:ec17/64 scope link 
       valid_lft forever preferred_lft forever
18: vethfe9a9edc@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether 5e:f9:a8:83:1a:01 brd ff:ff:ff:ff:ff:ff link-netnsid 3
    inet6 fe80::5cf9:a8ff:fe83:1a01/64 scope link 
       valid_lft forever preferred_lft forever
22: veth6121b250@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether d2:4e:6d:56:19:3e brd ff:ff:ff:ff:ff:ff link-netnsid 4
    inet6 fe80::d04e:6dff:fe56:193e/64 scope link 
       valid_lft forever preferred_lft forever
23: veth44b7dd01@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue master docker0 state UP 
    link/ether 1a:d4:ed:9c:ac:89 brd ff:ff:ff:ff:ff:ff link-netnsid 5
    inet6 fe80::18d4:edff:fe9c:ac89/64 scope link 
       valid_lft forever preferred_lft forever	   

	   
#查看部署完成后的状态命令(帮助)
[root@temp-test-01 ~]# kubectl get --help
#截取帮助部分用例
Examples:
  # List all pods in ps output format.
  kubectl get pods
  
  # List all pods in ps output format with more information (such as node name).
  kubectl get pods -o wide
  
  # List a single replication controller with specified NAME in ps output format.
  kubectl get replicationcontroller web
  
  # List a single pod in JSON output format.
  kubectl get -o json pod web-pod-13je7
  
  # List a pod identified by type and name specified in "pod.yaml" in JSON output format.
  kubectl get -f pod.yaml -o json
  
  # Return only the phase value of the specified pod.
  kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}
  
  # List all replication controllers and services together in ps output format.
  kubectl get rc,services
  
  # List one or more resources by their type and names.
  kubectl get rc/web service/frontend pods/web-pod-13je7
  
  # List all resources with different types.
  kubectl get all

##docker状态
[root@temp-test-01 ~]# docker ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES

[root@temp-test-02 ~]# docker ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES

[root@temp-test-03 ~]# docker ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES

##查看node状态
[root@temp-test-01 ~]# kubectl get node
NAME            STATUS     ROLES     AGE       VERSION
172.16.169.92   Ready      <none>    19h       v1.10.1
172.16.169.93   Ready      <none>    19h       v1.10.1

[root@temp-test-01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-2               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-0               Healthy   {"health": "true"}
   
[root@temp-test-01 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.1.0.1     <none>        443/TCP   20h

############################### 下面一些个人的操作记录 ###############################
至于操作的命令,这里不做解释,看到不懂得,先去官方文档查查。

#Kubernetes kubectl 官方文档
#https://kubernetes.io/docs/reference/kubectl/overview/

#Kubernetes中文社区 | 中文文档
#http://docs.kubernetes.org.cn/

##帮助命令
[root@temp-test-01 ~]# kubectl run --help
Create and run a particular image, possibly replicated. 

Creates a deployment or job to manage the created container(s).

Examples:
  # Start a single instance of nginx.
  kubectl run nginx --image=nginx
  
  # Start a single instance of hazelcast and let the container expose port 5701 .
  kubectl run hazelcast --image=hazelcast --port=5701
  
  # Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default"
in the container.
  kubectl run hazelcast --image=hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default"
  
  # Start a single instance of hazelcast and set labels "app=hazelcast" and "env=prod" in the container.
  kubectl run hazelcast --image=nginx --labels="app=hazelcast,env=prod"
  
  # Start a replicated instance of nginx.
  kubectl run nginx --image=nginx --replicas=5
  
  # Dry run. Print the corresponding API objects without creating them.
  kubectl run nginx --image=nginx --dry-run
  
  # Start a single instance of nginx, but overload the spec of the deployment with a partial set of values parsed from
JSON.
  kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
  
  # Start a pod of busybox and keep it in the foreground, don't restart it if it exits.
  kubectl run -i -t busybox --image=busybox --restart=Never
  
  # Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command.
  kubectl run nginx --image=nginx -- <arg1> <arg2> ... <argN>
  
  # Start the nginx container using a different command and custom arguments.
  kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
  
  # Start the perl container to compute π to 2000 places and print it out.
  kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
  
  # Start the cron job to compute π to 2000 places and print it out every 5 minutes.
  kubectl run pi --schedule="0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'

#创建一个deployment,replicas为2
kubectl run net-test --image=alpine --replicas=2 

#查看容器有没有正常运行
[root@temp-test-01 ~]# kubectl get pod
NAME                        READY     STATUS              RESTARTS   AGE
net-test-5767cb94df-q7jl2   0/1       ContainerCreating   0          1h
net-test-5767cb94df-svqh6   0/1       ContainerCreating   0          1h

#没有正常运行,查看日志
[root@temp-test-01 ~]# kubectl logs pod/net-test-5767cb94df-q7jl2
Error from server (BadRequest): container "net-test" in pod "net-test-5767cb94df-q7jl2" is waiting to start: ContainerCreating

删除pos
[root@temp-test-01 ~]# kubectl delete pods net-test-5767cb94df-q7jl2 
pod "net-test-5767cb94df-q7jl2" deleted

#居然又启动了一个
[root@temp-test-01 ~]# kubectl get pod
NAME                        READY     STATUS              RESTARTS   AGE
net-test-5767cb94df-q7jl2   0/1       Terminating         0          1h
net-test-5767cb94df-svqh6   0/1       ContainerCreating   0          1h
net-test-5767cb94df-vpvjk   0/1       ContainerCreating   0          5s

#因为在一开始是,设置两个副本,所以损失一个,kubectl会再启动一个,保证副本数
[root@temp-test-01 ~]# kubectl get pod
NAME                        READY     STATUS              RESTARTS   AGE
net-test-5767cb94df-svqh6   0/1       ContainerCreating   0          1h
net-test-5767cb94df-vpvjk   0/1       ContainerCreating   0          1m

[root@temp-test-01 ~]# kubectl get deployments
NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
net-test   2         2         2            0           1h

[root@temp-test-01 ~]# kubectl delete deployments net-test
deployment.extensions "net-test" deleted

[root@temp-test-01 ~]# kubectl get deployments
No resources found.

[root@temp-test-01 ~]# kubectl get pod
NAME                        READY     STATUS        RESTARTS   AGE
net-test-5767cb94df-svqh6   0/1       Terminating   0          1h

[root@temp-test-01 ~]# kubectl get pod
No resources found.

<!--创建服务常见错误事件解决方法-->
<!--1.Back-off restarting failed docker container-->
<!--说明:正在重启异常的Docker容器。-->
<!--解决方法:检查镜像中执行的Docker进程是否异常退出,若镜像内并无一持续运行的进程,可在创建服务的页面中添加执行脚本。-->

<!--2.fit failure on node: Insufficient cpu-->
<!--说明:集群CPU不足。-->
<!--解决方法:原因是节点无法提供足够的计算核心,请在服务页面修改CPU限制或者对集群进行扩容。-->

<!--3.no nodes available to schedule pods-->
<!--说明:集群资源不足。-->
<!--解决方法:原因是没有足够的节点用于承载实例,请在服务页面修改服务的实例数量,修改实例数量或者CPU限制。-->

<!--4.pod failed to fit in any node-->
<!--说明:没有合适的节点可供实例使用。-->
<!--解决方法:原因是服务配置了不合适的资源限制,导致没有合适的节点用于承载实例,请在服务页面修改服务的实例数量或者CPU限制。-->

<!--5.Liveness probe failed:-->
<!--说明:容器健康检查失败-->
<!--解决方法:检查镜像内容器进程是否正常,检查检测端口是否配置正确。-->

<!--6.Error syncing pod, skipping-->
<!--#journalctl -xeu kubelet 在node节点上可以查到这个报错-->
<!--Error syncing pod, skipping failed to "StartContainer" for with CrashLoopBackOff: "Back-off 5m0s restarting failed container-->
<!--说明:容器进程崩溃或退出。-->
<!--解决方法:检查容器内是否有持续运行的前台进程,若有检查其是否有异常行为。详情请参考构建docker镜像指南 -->

<!--原文地址:https://cloud.tencent.com/document/product/457/8187-->

##########使用deployment.yaml文件来部署container

样例文件:
[root@temp-test-01 ~]# vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.10.3
        ports:
        - containerPort: 80

#创建										#部署时,如果本地没有适用的images,需要从docker repo pull images;需要耗费一些时间,所有也可以先准备image。
kubectl create -f nginx-deployment.yaml	

#查看状态

[root@temp-test-01 deployment]# kubectl get deployment
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
net-test           2         2         2            0           1h
nginx-deployment   3         3         3            3           14m 


[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS             RESTARTS   AGE       IP          NODE
net-test-66cb6c7b78-mztlt           0/1       CrashLoopBackOff   137        11h       10.2.38.2   172.16.169.92
net-test-66cb6c7b78-wxmz4           0/1       CrashLoopBackOff   137        11h       10.2.83.2   172.16.169.93
nginx-deployment-75d56bb955-fczl5   1/1       Running            0          11m       10.2.38.3   172.16.169.92
nginx-deployment-75d56bb955-n7kcn   1/1       Running            0          11m       10.2.83.4   172.16.169.93
nginx-deployment-75d56bb955-q5wfq   1/1       Running            0          11m       10.2.83.3   172.16.169.93

删除没用的deployment
[root@temp-test-01 ~]# kubectl delete deployment net-test
deployment.extensions "net-test" deleted


[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS        RESTARTS   AGE       IP          NODE
net-test-66cb6c7b78-mztlt           0/1       Terminating   137        11h       10.2.38.2   172.16.169.92
net-test-66cb6c7b78-wxmz4           0/1       Terminating   137        11h       10.2.83.2   172.16.169.93
nginx-deployment-75d56bb955-fczl5   1/1       Running            0          11m       10.2.38.3   172.16.169.92
nginx-deployment-75d56bb955-n7kcn   1/1       Running            0          11m       10.2.83.4   172.16.169.93
nginx-deployment-75d56bb955-q5wfq   1/1       Running            0          11m       10.2.83.3   172.16.169.93

#查看状态kubectl describe deployment
kubectl describe deployment nginx-deployment
[root@temp-test-01 ~]# kubectl describe deployment nginx-deployment
Name:                   nginx-deployment
Namespace:              default
CreationTimestamp:      Tue, 19 Jun 2018 11:53:33 +0800
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision=1
Selector:               app=nginx
Replicas:               3 desired | 3 updated | 3 total | 3 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx:1.10.3
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nginx-deployment-75d56bb955 (3/3 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  17m   deployment-controller  Scaled up replica set nginx-deployment-75d56bb955 to 3

#访问测试 curl --head IP
[root@temp-test-01 deployment]# curl --head 10.2.38.3
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 04:05:56 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

[root@temp-test-01 deployment]# curl --head 10.2.83.3
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 04:06:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

[root@temp-test-01 deployment]# curl --head 10.2.83.4
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 04:06:03 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

#替换镜像,升级版本,更新镜像

kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record

[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS              RESTARTS   AGE       IP          NODE
nginx-deployment-7498dc98f8-hqp7g   0/1       ContainerCreating   0          6s        <none>      172.16.169.93
nginx-deployment-7498dc98f8-hscq4   1/1       Running             0          37s       10.2.38.4   172.16.169.92
nginx-deployment-75d56bb955-fczl5   1/1       Running             0          18m       10.2.38.3   172.16.169.92
nginx-deployment-75d56bb955-q5wfq   1/1       Running             0          18m       10.2.83.3   172.16.169.93

#查看状态kubectl describe deployment
[root@temp-test-01 ~]# kubectl describe deployment nginx-deployment
Name:                   nginx-deployment
Namespace:              default
CreationTimestamp:      Tue, 19 Jun 2018 11:53:33 +0800
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision=2
                        kubernetes.io/change-cause=kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record=true
Selector:               app=nginx
Replicas:               3 desired | 2 updated | 4 total | 3 available | 1 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx:1.12.2
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    ReplicaSetUpdated
OldReplicaSets:  nginx-deployment-75d56bb955 (2/2 replicas created)
NewReplicaSet:   nginx-deployment-7498dc98f8 (2/2 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  18m   deployment-controller  Scaled up replica set nginx-deployment-75d56bb955 to 3
  Normal  ScalingReplicaSet  47s   deployment-controller  Scaled up replica set nginx-deployment-7498dc98f8 to 1
  Normal  ScalingReplicaSet  16s   deployment-controller  Scaled down replica set nginx-deployment-75d56bb955 to 2
  Normal  ScalingReplicaSet  16s   deployment-controller  Scaled up replica set nginx-deployment-7498dc98f8 to 2


#看时间,已经重新部署完成了
[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS             RESTARTS   AGE       IP          NODE
nginx-deployment-7498dc98f8-hqp7g   1/1       Running            0          5m        10.2.83.5   172.16.169.93
nginx-deployment-7498dc98f8-hscq4   1/1       Running            0          6m        10.2.38.4   172.16.169.92
nginx-deployment-7498dc98f8-jqqz8   1/1       Running            0          4m        10.2.38.5   172.16.169.92  

#从上面的过程来说,升级的过程是一个Container替换一个Container,直到升级完成
#从上面IP地址看出,每次pod发生变化,IP都会发生变化。这时候就需要service了

#查看历史记录
kubectl rollout history deployment/nginx-deployment
[root@temp-test-01 ~]# kubectl rollout history deployment/nginx-deployment
deployments "nginx-deployment"
REVISION  CHANGE-CAUSE
1         <none>
2         kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record=true

#查看指定的历史记录
kubectl rollout history deployment/nginx-deployment --revision=1
[root@temp-test-01 ~]# kubectl rollout history deployment/nginx-deployment --revision=1
deployments "nginx-deployment" with revision #1
Pod Template:
  Labels:	app=nginx
	pod-template-hash=3181266511
  Containers:
   nginx:
    Image:	nginx:1.10.3
    Port:	80/TCP
    Host Port:	0/TCP
    Environment:	<none>
    Mounts:	<none>
  Volumes:	<none>

#查看指定的历史记录  
[root@temp-test-01 ~]# kubectl rollout history deployment/nginx-deployment --revision=2
deployments "nginx-deployment" with revision #2
Pod Template:
  Labels:	app=nginx
	pod-template-hash=3054875494
  Annotations:	kubernetes.io/change-cause=kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record=true
  Containers:
   nginx:
    Image:	nginx:1.12.2
    Port:	80/TCP
    Host Port:	0/TCP
    Environment:	<none>
    Mounts:	<none>
  Volumes:	<none>

#访问测试,查看版本 curl --head IP    #每次更新IP都会发生变化
#nginx版本已经发生变化
[root@temp-test-01 ~]# curl --head 10.2.83.5
HTTP/1.1 200 OK
Server: nginx/1.12.2
Date: Tue, 19 Jun 2018 04:22:50 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes

[root@temp-test-01 ~]# curl --head 10.2.38.4
HTTP/1.1 200 OK
Server: nginx/1.12.2
Date: Tue, 19 Jun 2018 04:22:56 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes

[root@temp-test-01 ~]# curl --head 10.2.38.5
HTTP/1.1 200 OK
Server: nginx/1.12.2
Date: Tue, 19 Jun 2018 04:22:58 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes

#快速回滚到上一个版本

[root@temp-test-01 ~]# kubectl rollout undo  deployment/nginx-deployment
deployment.apps "nginx-deployment" 

[root@temp-test-01 ~]# kubectl get pod
NAME                                READY     STATUS              RESTARTS   AGE
nginx-deployment-7498dc98f8-hqp7g   1/1       Running             0          10h
nginx-deployment-7498dc98f8-hscq4   1/1       Running             0          10h
nginx-deployment-7498dc98f8-jqqz8   0/1       Terminating         0          9h
nginx-deployment-75d56bb955-965n7   0/1       ContainerCreating   0          2s
nginx-deployment-75d56bb955-rrz8f   1/1       Running             0          4s

[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS             RESTARTS   AGE       IP          NODE
nginx-deployment-75d56bb955-965n7   1/1       Running            0          36s       10.2.38.6   172.16.169.92
nginx-deployment-75d56bb955-rpzv5   1/1       Running            0          33s       10.2.83.7   172.16.169.93
nginx-deployment-75d56bb955-rrz8f   1/1       Running            0          38s       10.2.83.6   172.16.169.93

#访问测试,查看版本 curl --head IP
[root@temp-test-01 ~]# curl --head 10.2.38.6
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 14:13:10 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

[root@temp-test-01 ~]# curl --head 10.2.83.7
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 14:13:17 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

[root@temp-test-01 ~]# curl --head 10.2.83.6
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Tue, 19 Jun 2018 14:13:19 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

#创建nginx service 

[root@temp-test-01 ~]# vim nginx-service.yaml
kind: Service
apiVersion: v1
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@temp-test-01 ~]# kubectl create -f nginx-service.yaml


#查看service #注意nginx-service的IP

[root@temp-test-01 ~]# kubectl get service
NAME            TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes      ClusterIP   10.1.0.1      <none>        443/TCP   22h
nginx-service   ClusterIP   10.1.212.20   <none>        80/TCP    9h



#访问service IP测试,需要在kube-proxy节点 curl --head IP
[root@temp-test-02 ~]# curl --head 10.1.212.20
HTTP/1.1 200 OK
Server: nginx/1.12.2
Date: Tue, 19 Jun 2018 14:11:51 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes  

#查看ACL
[root@temp-test-02 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      0          0         
TCP  10.1.212.20:80 rr
  -> 10.2.38.4:80                 Masq    1      0          0         
  -> 10.2.38.5:80                 Masq    1      0          0         
  -> 10.2.83.5:80                 Masq    1      0          0 

<!--ActiveConn是活动连接数,也就是tcp连接状态的ESTABLISHED;-->
<!--InActConn是指除了ESTABLISHED以外的,所有的其它状态的tcp连接.-->

<!--那既然这样,为什么从lvs里看的ActiveConn会比在真实机上通过netstats看到的ESTABLISHED高很多呢?问得好!-->
<!--这也是笔者一直迷惑而渐渐清晰的一个问题.原来lvs自身也有一个默认超时时间.-->
<!--可以用ipvsadm -L --timeout查看,默认是900 120 300,分别是TCP TCPFIN UDP的时间.-->
<!--也就是说一条tcp的连接经过lvs后,lvs会把这台记录保存15分钟,而不管这条连接是不是已经失效!-->
<!--所以如果你的服务器在15分钟以内有大量的并发请求连进来的时候,你就会看到这个数值直线上升.-->

<!--其实很多时候,我们看lvs的这个连接数是想知道现在的每台机器的真实连接数吧?-->
<!--怎么样做到这一点呢?其实知道现在的ActiveConn是怎样产生的,做到这一点就简单了.-->
<!--举个例子:比如你的lvs是用来负载网站,用的模式是dr,后台的web server用的nginx.这时候一条请求过来,-->
<!--在程序没有问题的情况下,一条连接最多也就五秒就断开了.这时候你可以这样设置:ipvsadm --set 5 10 300.-->
<!--设置tcp连接只保持5秒中.如果现在ActiveConn很高你会发现这个数值会很快降下来,-->
<!--直到降到和你用nginx的status看当前连接数的时候差不多.你可以继续增加或者减小5这个数值,直到真实机的status连接数和lvs里的ActiveConn一致.-->

#扩容docker副本
kubectl scale deployment nginx-deployment --replices 5

#确认当前副本数
[root@temp-test-01 ~]# kubectl get deployment
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   3         3         3            3           10h

#扩容副本数
[root@temp-test-01 ~]# kubectl scale --current-replicas=3 --replicas=5 deployment/nginx-deployment
deployment.extensions "nginx-deployment" scaled

#查看
[root@temp-test-01 ~]# kubectl get pod -o wide
NAME                                READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-deployment-75d56bb955-4jlgn   1/1       Running   0          2m        10.2.83.8   172.16.169.93
nginx-deployment-75d56bb955-8n2zj   1/1       Running   0          2m        10.2.38.7   172.16.169.92
nginx-deployment-75d56bb955-965n7   1/1       Running   0          23m       10.2.38.6   172.16.169.92
nginx-deployment-75d56bb955-rpzv5   1/1       Running   0          23m       10.2.83.7   172.16.169.93
nginx-deployment-75d56bb955-rrz8f   1/1       Running   0          23m       10.2.83.6   172.16.169.93

[root@temp-test-01 ~]# kubectl get deployment
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   5         5         5            5           10h

############################### CoreDNS服务 ###############################

创建CoreDNS服务
[root@temp-test-01 ~]# vim coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local. in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
    }
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      serviceAccountName: coredns
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      containers:
      - name: coredns
        image: coredns/coredns:1.0.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 10.1.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP


##注意:clusterIP,在部署kubelet,也就是node节点时就指定的,如果要改动,必须一一对应

[root@temp-test-01 deployment]# kubectl create -f coredns.yaml
serviceaccount "coredns" created
clusterrole.rbac.authorization.k8s.io "system:coredns" created
clusterrolebinding.rbac.authorization.k8s.io "system:coredns" created
configmap "coredns" created
deployment.extensions "coredns" created
service "coredns" created

#查看状态 kubectl get deployment -n kube-system
[root@temp-test-01 deployment]# kubectl get deployment -n kube-system
NAME      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
coredns   2         2         2            0           10s

[root@temp-test-01 deployment]# kubectl get deployment -n kube-system
NAME      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
coredns   2         2         2            1           1m

[root@temp-test-01 deployment]# kubectl get deployment -n kube-system
NAME      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
coredns   2         2         2            2           3m

#查看service
[root@temp-test-01 deployment]# kubectl get service -n kube-system
NAME      TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
coredns   ClusterIP   10.1.0.2     <none>        53/UDP,53/TCP   3m

#查看pod 需指定命名空间
[root@temp-test-01 deployment]# kubectl get pod -n kube-system
NAME                       READY     STATUS    RESTARTS   AGE
coredns-77c989547b-8cnwk   1/1       Running   0          4m
coredns-77c989547b-rh5x2   1/1       Running   0          4m

#查看所有
[root@temp-test-01 deployment]# kubectl get all -n kube-system
NAME                                        READY     STATUS    RESTARTS   AGE
pod/coredns-77c989547b-8cnwk                1/1       Running   0          11h
pod/coredns-77c989547b-rh5x2                1/1       Running   0          11h
pod/kubernetes-dashboard-66c9d98865-jzlxm   1/1       Running   0          11h

NAME                           TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
service/coredns                ClusterIP   10.1.0.2      <none>        53/UDP,53/TCP   11h
service/kubernetes-dashboard   NodePort    10.1.138.21   <none>        443:24638/TCP   11h

NAME                                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns                2         2         2            2           11h
deployment.apps/kubernetes-dashboard   1         1         1            1           11h

NAME                                              DESIRED   CURRENT   READY     AGE
replicaset.apps/coredns-77c989547b                2         2         2         11h
replicaset.apps/kubernetes-dashboard-66c9d98865   1         1         1         11h

[root@temp-test-01 deployment]# kubectl get pod -o wide
NAME                                READY     STATUS    RESTARTS   AGE       IP           NODE
busybox                             1/1       Running   1          1h        10.2.83.12   172.16.169.93
my-nginx-69fbc98fd4-74h6j           1/1       Running   0          5m        10.2.38.14   172.16.169.92
my-nginx-69fbc98fd4-rmlzw           1/1       Running   0          5m        10.2.83.13   172.16.169.93
my2-nginx-558f898b58-46xnk          1/1       Running   0          49s       10.2.83.14   172.16.169.93
my2-nginx-558f898b58-mfmrs          1/1       Running   0          49s       10.2.38.15   172.16.169.92
nginx-deployment-75d56bb955-4jlgn   1/1       Running   0          11h       10.2.83.8    172.16.169.93
nginx-deployment-75d56bb955-8n2zj   1/1       Running   0          11h       10.2.38.7    172.16.169.92
nginx-deployment-75d56bb955-965n7   1/1       Running   0          12h       10.2.38.6    172.16.169.92
nginx-deployment-75d56bb955-rpzv5   1/1       Running   0          12h       10.2.83.7    172.16.169.93
nginx-deployment-75d56bb955-rrz8f   1/1       Running   0          12h       10.2.83.6    172.16.169.93

[root@temp-test-01 deployment]# kubectl get services --all-namespaces
NAMESPACE     NAME                   TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
default       kubernetes             ClusterIP   10.1.0.1       <none>        443/TCP         3d
default       my-nginx-service       ClusterIP   10.1.241.92    <none>        80/TCP          2d
default       my2-nginx-service      ClusterIP   10.1.209.251   <none>        80/TCP          2d
default       nginx-service          ClusterIP   10.1.212.20    <none>        80/TCP          2d
kube-system   coredns                ClusterIP   10.1.0.2       <none>        53/UDP,53/TCP   2d
kube-system   kubernetes-dashboard   NodePort    10.1.138.21    <none>        443:24638/TCP   2d


#测试DNS
root@my-nginx-69fbc98fd4-74h6j:/# ping my-nginx-service
PING my-nginx-service.default.svc.cluster.local (10.1.241.92) 56(84) bytes of data.
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=1 ttl=64 time=0.086 ms
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=2 ttl=64 time=0.072 ms
^C
--- my-nginx-service.default.svc.cluster.local ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.072/0.079/0.086/0.007 ms
l.ot@my-nginx-69fbc98fd4-74h6j:/# ping my-nginx-service.default.svc.cluster.local
PING my-nginx-service.default.svc.cluster.local (10.1.241.92) 56(84) bytes of data.
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=1 ttl=64 time=0.077 ms
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=2 ttl=64 time=0.084 ms
^C
--- my-nginx-service.default.svc.cluster.local ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.077/0.080/0.084/0.009 ms


root@my2-nginx-558f898b58-46xnk:/# nslookup my-nginx-service
Server:		10.1.0.2
Address:	10.1.0.2#53

Name:	my-nginx-service.default.svc.cluster.local
Address: 10.1.241.92

root@my2-nginx-558f898b58-46xnk:/# nslookup my2-nginx-service
Server:		10.1.0.2
Address:	10.1.0.2#53

Name:	my2-nginx-service.default.svc.cluster.local
Address: 10.1.209.251


alot@my2-nginx-558f898b58-46xnk:/# ping my-nginx-service.default.svc.cluster.local
PING my-nginx-service.default.svc.cluster.local (10.1.241.92) 56(84) bytes of data.
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=1 ttl=64 time=0.085 ms
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=2 ttl=64 time=0.065 ms
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=3 ttl=64 time=0.073 ms
64 bytes from my-nginx-service.default.svc.cluster.local (10.1.241.92): icmp_seq=4 ttl=64 time=0.084 ms
^C
--- my-nginx-service.default.svc.cluster.local ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3001ms


#问题记录  #用service对应的dns域名,无法访问服务
[root@temp-test-03 ~]# curl --head my-nginx-service.default.svc.cluster.local
curl: (6) Could not resolve host: my-nginx-service.default.svc.cluster.local; Name or service not known
[root@temp-test-03 ~]# curl --head 10.1.241.92
curl: (7) Failed connect to 10.1.241.92:80; Connection refused



[root@temp-test-03 ~]# curl --head 10.1.212.20
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Fri, 22 Jun 2018 02:34:23 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes


[root@temp-test-03 ~]# ping 10.1.209.251
PING 10.1.209.251 (10.1.209.251) 56(84) bytes of data.
64 bytes from 10.1.209.251: icmp_seq=1 ttl=64 time=0.106 ms
64 bytes from 10.1.209.251: icmp_seq=2 ttl=64 time=0.058 ms
^C
--- 10.1.209.251 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.058/0.082/0.106/0.024 ms
[root@temp-test-03 ~]# curl --head 10.1.209.251
curl: (7) Failed connect to 10.1.209.251:80; Connection refused


[root@temp-test-03 ~]# curl --head 10.2.38.14
HTTP/1.1 200 OK
Server: nginx/1.12.2
Date: Fri, 22 Jun 2018 02:36:32 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes

[root@temp-test-03 ~]# ipvsadm -nL
Try `ipvsadm -h' or 'ipvsadm --help' for more information.
[root@temp-test-03 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.16.169.93:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      1          0         
TCP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          0         
  -> 10.2.83.9:53                 Masq    1      0          0         
TCP  10.1.138.21:443 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.209.251:80 rr
TCP  10.1.212.20:80 rr
  -> 10.2.38.6:80                 Masq    1      0          0         
  -> 10.2.38.7:80                 Masq    1      0          0         
  -> 10.2.83.6:80                 Masq    1      0          0         
  -> 10.2.83.7:80                 Masq    1      0          0         
  -> 10.2.83.8:80                 Masq    1      0          0         
TCP  10.1.241.92:80 rr
TCP  10.2.83.0:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.2.83.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  127.0.0.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
UDP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          0         
  -> 10.2.83.9:53                 Masq    1      0          1     

############################### 部署dashboard ###############################

admin-user-sa-rbac.yaml kubernetes-dashboard.yaml ui-admin-rbac.yaml ui-read-rbac.yaml

kubectl create -f .

#可能会出现 ImagePullBackOff
这是可能image在国外的站点,无法pull下来。可以通过其他方法pull,然后放至本地


#查看日志 #-n 指定命名空间	#kubectl logs pod/NAME_ID -n kube-system

[root@temp-test-01 dashboard]# kubectl get pod -n kube-system
NAME                                    READY     STATUS    RESTARTS   AGE
coredns-77c989547b-8cnwk                1/1       Running   0          1h
coredns-77c989547b-rh5x2                1/1       Running   0          1h
kubernetes-dashboard-66c9d98865-jzlxm   1/1       Running   0          33m


#查看dashboard

[root@temp-test-01 dashboard]# kubectl get service -n kube-system
NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
coredns                ClusterIP   10.1.0.2      <none>        53/UDP,53/TCP   1h
kubernetes-dashboard   NodePort    10.1.138.21   <none>        443:24638/TCP   34m

[root@temp-test-01 dashboard]# kubectl get pod -o wide -n kube-system
NAME                                    READY     STATUS    RESTARTS   AGE       IP           NODE
coredns-77c989547b-8cnwk                1/1       Running   0          1h        10.2.38.8    172.16.169.92
coredns-77c989547b-rh5x2                1/1       Running   0          1h        10.2.83.9    172.16.169.93
kubernetes-dashboard-66c9d98865-jzlxm   1/1       Running   0          35m       10.2.38.12   172.16.169.92

[root@temp-test-02 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.16.169.92:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      2          0         
TCP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          0         
  -> 10.2.83.9:53                 Masq    1      0          0         
TCP  10.1.138.21:443 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.212.20:80 rr
  -> 10.2.38.6:80                 Masq    1      0          0         
  -> 10.2.38.7:80                 Masq    1      0          0         
  -> 10.2.83.6:80                 Masq    1      0          0         
  -> 10.2.83.7:80                 Masq    1      0          0         
  -> 10.2.83.8:80                 Masq    1      0          0         
TCP  10.2.38.0:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.2.38.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  127.0.0.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
UDP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          0         
  -> 10.2.83.9:53                 Masq    1      0          0 

  
[root@temp-test-03 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.16.169.93:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.0.1:443 rr persistent 10800
  -> 172.16.169.91:6443           Masq    1      1          0         
TCP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          0         
  -> 10.2.83.9:53                 Masq    1      0          0         
TCP  10.1.138.21:443 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.1.212.20:80 rr
  -> 10.2.38.6:80                 Masq    1      0          0         
  -> 10.2.38.7:80                 Masq    1      0          0         
  -> 10.2.83.6:80                 Masq    1      0          0         
  -> 10.2.83.7:80                 Masq    1      0          0         
  -> 10.2.83.8:80                 Masq    1      0          0         
TCP  10.2.83.0:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  10.2.83.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
TCP  127.0.0.1:24638 rr
  -> 10.2.38.12:8443              Masq    1      0          0         
UDP  10.1.0.2:53 rr
  -> 10.2.38.8:53                 Masq    1      0          2         
  -> 10.2.83.9:53                 Masq    1      0          2   
  
使用浏览器打开(建议使用火狐)
https://kube-proxy_IP:PORT

https://172.16.169.92:24638

获取token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk'(print $1)')

#可能出现的问题是,无法pull image

<!--搭建只是皮毛,学会各个插件的工作原理,还有很重要的yaml文件怎么去写,这个才是重中之重-->