金丝雀、蓝绿升级
金丝雀:先在1台或少数机器升级,验证无误后再升级其他。优点为影响面小且可控,缺点是需要额外实现自动更新。
蓝绿:2组机器,蓝代表当前v1版本,绿为已升级的v2版本。通过LB将流量导到V2完成升级。优点是切换迅速,缺点是会影响全部用户。
情景描述
以下是一个,利用deployment下的ReplicaSet、Pod特性,对于金丝雀发布的实现。
思路:
- 10个replicas,版本为v1
- 部署一个v2版本的replica(占比为10%)
- 等待一段时间,确保v2版本正常服务
- 部署10个v2版本的副本数
- 等待,直至所有pod实例状态为Ready
- 停止所有v1的replica
v1、v2配置文件
app-v1.yaml
apiVersion: v1
kind: Service
metadata:
name: my-app
labels:
app: my-app
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: http
selector:
app: my-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v1
labels:
app: my-app
spec:
replicas: 10
selector:
matchLabels:
app: my-app
version: v1.0.0
template:
metadata:
labels:
app: my-app
version: v1.0.0
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9101"
spec:
containers:
- name: my-app
image: containersol/k8s-deployment-strategies
ports:
- name: http
containerPort: 8080
- name: probe
containerPort: 8086
env:
- name: VERSION
value: v1.0.0
livenessProbe:
httpGet:
path: /live
port: probe
initialDelaySeconds: 5
periodSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: probe
periodSeconds: 5
app-v2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v2
labels:
app: my-app
spec:
replicas: 1
selector:
matchLabels:
app: my-app
version: v2.0.0
template:
metadata:
labels:
app: my-app
version: v2.0.0
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9101"
spec:
containers:
- name: my-app
image: containersol/k8s-deployment-strategies
ports:
- name: http
containerPort: 8080
- name: probe
containerPort: 8086
env:
- name: VERSION
value: v2.0.0
livenessProbe:
httpGet:
path: /live
port: probe
initialDelaySeconds: 5
periodSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: probe
periodSeconds: 5
开始表演
# 部署v1的deploy
[root@master k8s]# k apply -f app-v1.yaml
service/my-app created
deployment.apps/my-app-v1 created
# 验证deploy是否部署成功
[root@master k8s]# curl 10.101.252.35:80
Host: my-app-v1-599bdf7545-qjdvw, Version: v1.0.0
# 新开窗口 查看pod数量变化
kubectl get po
# 部署v2 1个副本
[root@master k8s]# k apply -f app-v2.yaml
deployment.apps/my-app-v2 created
# 并将v1副本scale为9个 此时不同版本的副本为9+1
[root@master k8s]# kubectl scale --replicas=9 deploy my-app-v1
deployment.apps/my-app-v1 scaled
# 模拟业务请求
# 此处v1:v2不一定9:1,LB策略问题,不深究
[root@master k8s]# service=10.101.252.35:80
[root@master k8s]# while sleep 0.1; do curl "$service"; done
Host: my-app-v1-599bdf7545-cdjq9, Version: v1.0.0
Host: my-app-v1-599bdf7545-p57x9, Version: v1.0.0
Host: my-app-v1-599bdf7545-x9p97, Version: v1.0.0
Host: my-app-v1-599bdf7545-sr2cn, Version: v1.0.0
Host: my-app-v1-599bdf7545-cdjq9, Version: v1.0.0
Host: my-app-v2-7cdf7685d9-gzb42, Version: v2.0.0
Host: my-app-v1-599bdf7545-cdjq9, Version: v1.0.0
Host: my-app-v2-7cdf7685d9-gzb42, Version: v2.0.0
...
# 此时可以认为 v2版本服务正常 将其scale到10个副本
[root@master k8s]# kubectl scale --replicas=10 deploy my-app-v2
deployment.apps/my-app-v2 scaled
# 等v2 scale完毕 将v1的deploy删除
[root@master k8s]# kubectl delete deploy my-app-v1
deployment.apps "my-app-v1" deleted
# 重新模拟请求 回显符合期望
[root@master k8s]# while sleep 0.1; do curl "$service"; done
Host: my-app-v2-7cdf7685d9-nnr6d, Version: v2.0.0
Host: my-app-v2-7cdf7685d9-gzb42, Version: v2.0.0
Host: my-app-v2-7cdf7685d9-q75cg, Version: v2.0.0
Host: my-app-v2-7cdf7685d9-df94h, Version: v2.0.0
Host: my-app-v2-7cdf7685d9-df94h, Version: v2.0.0
...
# 清理现场
$ kubectl delete all -l app=my-app