k8s网关kong

648 阅读10分钟

一.kong安装

1.创建名称空间

kubectl create ns kong

2.安装postgresql

2.1 postgresql yaml

---
apiVersion: v1
kind: Secret
metadata:
  name: postgresql
  labels:
    app.kubernetes.io/name: postgresql
    app.kubernetes.io/instance: postgresql
  namespace: kong
type: Opaque
data:
  postgresql-password: "WjJDZzN1RjFRNg=="
---
apiVersion: v1
kind: Service
metadata:
  name: postgresql
  labels:
    app.kubernetes.io/name: postgresql
    app.kubernetes.io/instance: postgresql
  annotations:
  namespace: kong
spec:
  type: ClusterIP
  ports:
    - name: tcp-postgresql
      port: 5432
      targetPort: tcp-postgresql
  selector:
    app.kubernetes.io/name: postgresql
    app.kubernetes.io/instance: postgresql
    role: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: postgresql
  labels:
    app.kubernetes.io/name: postgresql
    app.kubernetes.io/instance: postgresql
    app.kubernetes.io/component: primary
  annotations:
  namespace: kong
spec:
  serviceName: postgresql
  replicas: 1
  updateStrategy:
    type: RollingUpdate
  selector:
    matchLabels:
      app.kubernetes.io/name: postgresql
      app.kubernetes.io/instance: postgresql
      role: primary
  template:
    metadata:
      name: postgresql
      labels:
        app.kubernetes.io/name: postgresql
        app.kubernetes.io/instance: postgresql
        role: primary
        app.kubernetes.io/component: primary
    spec:
      affinity:
        podAffinity:

        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - podAffinityTerm:
                labelSelector:
                  matchLabels:
                    app.kubernetes.io/name: postgresql
                    app.kubernetes.io/instance: postgresql
                    app.kubernetes.io/component: primary
                namespaces:
                  - "kong"
                topologyKey: kubernetes.io/hostname
              weight: 1
        nodeAffinity:

      securityContext:
        fsGroup: 1001
      containers:
        - name: postgresql
          image: docker.io/bitnami/postgresql:11.12.0-debian-10-r20
          imagePullPolicy: "IfNotPresent"
          resources:
            requests:
              cpu: 250m
              memory: 256Mi
            limits:
              cpu: 1000m
              memory: 512Mi
          securityContext:
            runAsUser: 1001
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: POSTGRESQL_PORT_NUMBER
              value: "5432"
            - name: POSTGRESQL_VOLUME_DIR
              value: "/bitnami/postgresql"
            - name: PGDATA
              value: "/bitnami/postgresql/data"
            - name: POSTGRES_USER
              value: "postgres"
            - name: POSTGRES_PASSWORD
              value: ""
              valueFrom:
                secretKeyRef:
                  name: postgresql
                  key: postgresql-password
            - name: POSTGRESQL_ENABLE_LDAP
              value: "no"
            - name: POSTGRESQL_ENABLE_TLS
              value: "no"
            - name: POSTGRESQL_LOG_HOSTNAME
              value: "false"
            - name: POSTGRESQL_LOG_CONNECTIONS
              value: "false"
            - name: POSTGRESQL_LOG_DISCONNECTIONS
              value: "false"
            - name: POSTGRESQL_PGAUDIT_LOG_CATALOG
              value: "off"
            - name: POSTGRESQL_CLIENT_MIN_MESSAGES
              value: "error"
            - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
              value: "pgaudit"
          ports:
            - name: tcp-postgresql
              containerPort: 5432
          livenessProbe:
            exec:
              command:
                - /bin/sh
                - -c
                - exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432
            initialDelaySeconds: 30
            periodSeconds: 10
            timeoutSeconds: 5
            successThreshold: 1
            failureThreshold: 6
          readinessProbe:
            exec:
              command:
                - /bin/sh
                - -c
                - -e
                - |
                  exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432
                  [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
            successThreshold: 1
            failureThreshold: 6
          volumeMounts:
            - name: dshm
              mountPath: /dev/shm
            - name: data
              mountPath: /bitnami/postgresql
              subPath:
            - mountPath: /etc/localtime
              name: host-time
              readOnly: true
      volumes:
        - name: dshm
          emptyDir:
            medium: Memory
        - hostPath:
            path: /etc/localtime
          name: host-time
  volumeClaimTemplates:
    - metadata:
        name: data
      spec:
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: "10Gi"

2.2 导入

kubectl  apply -f postgresql-sts.yaml 

2.3 查看

[root@master ~]# kubectl get pod -n kong
NAME           READY   STATUS    RESTARTS   AGE
postgresql-0   1/1     Running   0          43s

2.4 进入postgresql容器中创建kong和konga数据库

###数据库密码:Z2Cg3uF1Q6
[root@master ~]# kubectl exec -it -n kong postgresql-0 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
I have no name!@postgresql-0:/$ psql -h 127.0.0.1   -U  postgres
Password for user postgres:
psql (11.12)
Type "help" for help.

postgres=# create database kong ;
CREATE DATABASE
postgres=# create database konga ;
CREATE DATABASE
postgres=#

3.helm安装kong

3.1 helm安装kong

helm repo add kong https://charts.konghq.com
helm repo update
helm search repo kong/kong
helm pull kong/kong
tar -zxvf kong-*.tgz
cd kong

#修改配置
vim values.yaml

# 打开http-admin端口
admin: enable: false  修改为 admin:enable: true
http: enable: false  修改为 http: enable: true

#使用外部postgresql
[root@master kong]# grep -B24 'pg_ssl_verify: false'  values.yaml
env:
  database: "off"
  # the chart uses the traditional router (for Kong 3.x+) because the ingress
  # controller generates traditional routes. if you do not use the controller,
  # you may set this to "traditional_compatible" or "expressions" to use the new
  # DSL-based router
  router_flavor: "traditional"
  nginx_worker_processes: "2"
  proxy_access_log: /dev/stdout
  admin_access_log: /dev/stdout
  admin_gui_access_log: /dev/stdout
  portal_api_access_log: /dev/stdout
  proxy_error_log: /dev/stderr
  admin_error_log: /dev/stderr
  admin_gui_error_log: /dev/stderr
  portal_api_error_log: /dev/stderr
  prefix: /kong_prefix/
  database: "postgres"  #使用postgresql数据库
  pg_host: "postgresql.kong.svc.cluster.local"
  pg_port: "5432"
  pg_user: "postgres"
  pg_password: "Z2Cg3uF1Q6"
  pg_database: kong
  pg_ssl: false
  pg_ssl_verify: false
  
#安装
helm install kong  -f values.yaml .  -n kong
#查看
[root@master kong]# kubectl get pod -n kong
NAME                              READY   STATUS      RESTARTS   AGE
kong-kong-57989d4bf-bqftb         2/2     Running     0          12m
kong-kong-init-migrations-9zv9z   0/1     Completed   0          12m
postgresql-0                      1/1     Running     0          42m  

3.2 持久化配置文件(方便后期更改配置)

1.复制出配置文件(nginx-kong-conf文件)

kubectl  cp -c proxy  kong/`kubectl get pod -n kong |grep kong-|grep -v init|awk '{print $1}'`:/kong_prefix/nginx-kong.conf   ./nginx-kong.conf

2.创建configmap

kubectl create configmap  nginx-kong-conf -n kong --from-file=./nginx-kong.conf

3.修改kong的deploy,挂载配置文件(挂载到proxy容器下)

[root@master ~]# kubectl get deploy -n kong |grep kong
kong-kong     1/1     1            1           7h11m
konga         1/1     1            1           7h4m
[root@master ~]# kubectl edit deploy -n kong  kong-kong
[root@master ~]# kubectl get deploy -n kong kong-kong  -o yaml |grep  -C2 "nginx-kong"
        - mountPath: /tmp
          name: kong-kong-tmp
        - mountPath: /kong_prefix/nginx-kong.conf 
          name: nginx-kong-conf
          subPath: nginx-kong.conf
      dnsPolicy: ClusterFirst
      initContainers:
--
      - configMap:   
          defaultMode: 420
          name: nginx-kong-conf
        name: nginx-kong-conf

4.安装konga

4.1 初始化konga数据库

PG_HOST=`kubectl get svc -n kong  postgresql|awk -F' '+ 'NR==2{print $3}'`
docker run --rm pantsel/konga:latest -c prepare -a postgres -u postgresql://postgres:Z2Cg3uF1Q6@${PG_HOST}:5432/konga

4.2 konga yaml文件

apiVersion: apps/v1
kind: Deployment
metadata:
  name: konga
  namespace: kong
spec:
  replicas: 1
  selector:
    matchLabels:
      app: konga
  template:
    metadata:
      labels:
        app: konga
    spec:
      containers:
      - name: konga
        image: pantsel/konga:latest
        imagePullPolicy: IfNotPresent
        env:
        - name: DB_ADAPTER
          value: "postgres"
        - name: DB_DATABASE
          value: "konga"
        - name: DB_HOST
          value: "postgresql.kong.svc.cluster.local"
        - name: DB_PASSWORD
          value: "Z2Cg3uF1Q6"
        - name: DB_PORT
          value: "5432"
        - name: DB_USER
          value: "postgres"
        - name: KONGA_LOG_LEVEL
          value: "info"
        - name: NODE_ENV
          value: "production"
        ports:
        - containerPort: 1337
          protocol: TCP
          name: konga
        volumeMounts:
        - mountPath: /etc/localtime
          name: host-time
          readOnly: true
      volumes:
      - hostPath:
          path: /etc/localtime
        name: host-time
---
apiVersion: v1
kind: Service
metadata:
  name: konga
  namespace: kong
  labels:
    app: konga
spec:
  ports:
  - name: konga-port
    port: 1337
    nodePort: 31337
    protocol: TCP
  type: NodePort
  selector:
    app: konga

4.2 导入

kubectl apply -f  konga-deploy.yaml

4.3 查看

[root@master ~]# kubectl get pod,svc -n kong|grep konga
pod/konga-6fc9774cd4-8nt5d            1/1     Running     0          6m40s
service/konga                          NodePort       10.110.155.32   <none>        1337:31337/TCP                  6m40s

4.4浏览器访问测试

http://NODE_IP:31337

4.5 将kong-proxy修改为nodePort(方便访问测试,默认情况无法使用loadBalancer)

查看

[root@master ~]# kubectl get svc -n kong|grep kong-proxy
kong-kong-proxy                LoadBalancer   10.103.173.13    <pending>     80:31993/TCP,443:32435/TCP      7m5s

修改

kubectl  patch svc  kong-kong-proxy -n kong -p '{"spec":{"type":"NodePort","ports":[{"name":"kong-proxy","port":80,"protocol":"TCP","targetPort":8000,"nodePort":31993},{"name":"kong-proxy-tls","port":443,"protocol":"TCP","targetPort":8443,"nodePort":32435}]}}'

查看

[root@master ~]# kubectl get svc -n kong|grep kong-proxy
kong-kong-proxy                NodePort    10.103.173.13    <none>        80:31993/TCP,443:32435/TCP      13m

4.6 配置kong admin访问地址

1.查看svc地址

[root@master ~]# kubectl get svc -n kong |grep admin
kong-kong-admin                NodePort       10.100.244.82    <none>        8001:32276/TCP,8444:31398/TCP   16m

2.配置地址

image.png

5.使用

5.1 创建测试项目

kind: Service
apiVersion: v1
metadata:
  name: example-app
  labels:
    app: example-app
spec:
  selector:
    app: example-app
  ports:
  - name: web
    port: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: example-app
spec:
  replicas: 1
  selector:
    matchLabels:
      app: example-app
  template:
    metadata:
      labels:
        app: example-app
    spec:
      containers:
      - name: example-app
        image: nginx:alpine
        ports:
        - name: web
          containerPort: 80

2.导入及测试

[root@master ~]# kubectl apply -f  test-nginx.yaml
service/example-app created
deployment.apps/example-app created
[root@master ~]# kubectl get pod |grep example-app
example-app-56c9b6d95b-9p292              1/1     Running   0          15s
[root@master ~]# kubectl get svc  example-app
NAME          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
example-app   ClusterIP   10.99.139.214   <none>        80/TCP    62s
c
[root@master ~]# curl 10.99.139.214
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

3.创建测试域名访问

  • 使用nginx.kongtest.com域名测试
创建services

image.png

添加route

image.png

在本地的hosts文件中添加nginx.kongtest.com域名的hosts解析,访问浏览器测试

image.png

4.https配置

1.手动创建证书

openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout nginx.kongtest.com-key.key -out nginx.kongtest.com.pem -subj "/CN=ssl.kongtest.com"

2.查看证书文件

[root@master ~]# ll nginx.kongtest.com*
-rw-r--r-- 1 root root 1704 11月 14 09:54 nginx.kongtest.com-key.key
-rw-r--r-- 1 root root 1115 11月 14 09:54 nginx.kongtest.com.pem

3.在konga中添加证书

image.png

image.png

image.png

4.访问测试

image.png

6.插件使用

6.1 Basic Auth

1.创建 Consumers

image.png

2.创建账号密码

image.png

image.png

3.使用Basic Auth插件

image.png

image.png

  • 全部为空即可 image.png

image.png

4.访问测试

image.png

6.2 ip-restriction(ip黑白名单访问限制)

  • 注意:deny的优先级高于allow

1.创建

image.png

image.png

image.png

2.测试(将本地电脑的ip设置为黑名单测试)

  • 因为使用的nodeport,所以kong无法记录客户端真实ip,所以这里将externalTrafficPolicy设置为local
kubectl  patch svc  kong-kong-proxy -n kong -p '{"spec":{"externalTrafficPolicy":"Local"}}'
  • 本地测试(无法访问) image.png
  • 未设置黑名单ip访问(可以访问)

image.png

6.3 Rate Limiting (限流)

1.创建

  • 所有限制中,至少要填一个限制才行

image.png

image.png

2.测试(每分钟1次)

image.png

image.png

6.4 Request Transformer(路由重写)

1.在nginx容器中新建目录和文件

[root@master ~]# kubectl exec -it example-app-56c9b6d95b-bclbv sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # mkdir /usr/share/nginx/html/a -p
/ # echo aaaaa >/usr/share/nginx/html/a/a.txt
/ # cat /usr/share/nginx/html/a/a.txt
aaaaa

2.访问测试

image.png

3.添加Request Transformer插件,将访问请求转发到/a/a.txt

image.png

image.png

4.测试

image.png

7.安装外部插件

1.外部插件安装条件

  • kong创建插件文档参考:www.cnblogs.com/huxianglin/…

  • kong创建插件必须包含2个文件

  • handler.lua (主要负责业务逻辑功能编写)

  • schema.lua (主要负责插件参数定制)

2.创建handler.lua和schema.lua文件

  • handler.lua
local plugin = {
  PRIORITY = 1000, -- set the plugin priority, which determines plugin execution order
  VERSION = "0.1", -- version in X.Y.Z format. Check hybrid-mode compatibility requirements.
}


function plugin:init_worker()

  -- your custom code here
  kong.log.debug("saying hi from the 'init_worker' handler")

end



function plugin:certificate(plugin_conf)

  -- your custom code here
  kong.log.debug("saying hi from the 'certificate' handler")

end



function plugin:rewrite(plugin_conf)

  -- your custom code here
  kong.log.debug("saying hi from the 'rewrite' handler")

end



function plugin:access(plugin_conf)

  -- your custom code here
  kong.log.inspect(plugin_conf)   -- check the logs for a pretty-printed config!
  kong.service.request.set_header(plugin_conf.request_header, "this is on a request")

end


function plugin:header_filter(plugin_conf)

  -- your custom code here, for example;
  kong.response.set_header(plugin_conf.response_header, "this is on the response")

end


function plugin:body_filter(plugin_conf)

  -- your custom code here
  kong.log.debug("saying hi from the 'body_filter' handler")

end


function plugin:log(plugin_conf)

  -- your custom code here
  kong.log.debug("saying hi from the 'log' handler")

end

-- return our plugin object
return plugin
  • schema.lua
local typedefs = require "kong.db.schema.typedefs"


local PLUGIN_NAME = "myplugin"


local schema = {
  name = PLUGIN_NAME,
  fields = {
    -- the 'fields' array is the top-level entry with fields defined by Kong
    { consumer = typedefs.no_consumer },  -- this plugin cannot be configured on a consumer (typical for auth plugins)
    { protocols = typedefs.protocols_http },
    { config = {
        -- The 'config' record is the custom part of the plugin schema
        type = "record",
        fields = {
          -- a standard defined field (typedef), with some customizations
          { request_header = typedefs.header_name {
              required = true,
              default = "Hello-World" } },
          { response_header = typedefs.header_name {
              required = true,
              default = "Bye-World" } },
          { ttl = { -- self defined field
              type = "integer",
              default = 600,
              required = true,
              gt = 0, }}, -- adding a constraint for the value
        },
        entity_checks = {
          -- add some validation rules across fields
          -- the following is silly because it is always true, since they are both required
          { at_least_one_of = { "request_header", "response_header" }, },
          -- We specify that both header-names cannot be the same
          { distinct = { "request_header", "response_header"} },
        },
      },
    },
  },
}

return schema

3.持久化kong插件目录

#查看kong插件目录(使用的/opt目录)
[root@master demo]# kubectl get deploy -nkong   kong-kong -o yaml|grep -A1  KONG_LUA_PACKAGE_PATH
        - name: KONG_LUA_PACKAGE_PATH
          value: /opt/?.lua;/opt/?/init.lua;;
--
        - name: KONG_LUA_PACKAGE_PATH
          value: /opt/?.lua;/opt/?/init.lua;;
--
        - name: KONG_LUA_PACKAGE_PATH
          value: /opt/?.lua;/opt/?/init.lua;;
#创建pvc,让/opt目录持久化(我这设置了默认动态存储,所以创建pvc会自动绑定pv)  
[root@master ~]# cat test-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: kong-pvc
  namespace: kong
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
[root@master ~]# kubectl  apply -f test-pvc.yaml
persistentvolumeclaim/kong-pvc created
[root@master ~]# kubectl get pvc -n kong
NAME         STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS          AGE
kong-pvc     Bound    pvc-38824676-1e65-4c53-8157-f1a6885e27f2   2Gi        RWX            managed-nfs-storage   4s
#修改kong deploy配置,挂载pvc
[root@master ~]# kubectl edit  deploy kong-kong -n kong
[root@master myplugin]# kubectl get   deploy kong-kong -n kong -o yaml|grep -C 3  kong-opt
          name: kong-kong-token
          readOnly: true
        - mountPath: /opt
          name: kong-opt
      - env:
        - name: KONG_ADMIN_ACCESS_LOG
          value: /dev/stdout
--
        - mountPath: /tmp
          name: kong-kong-tmp
        - mountPath: /opt
          name: kong-opt
      dnsPolicy: ClusterFirst
      initContainers:
      - command:
--
        - mountPath: /tmp
          name: kong-kong-tmp
        - mountPath: /opt
          name: kong-opt
      - args:
        - /bin/bash
        - -c
--
        - mountPath: /tmp
          name: kong-kong-tmp
        - mountPath: /opt
          name: kong-opt
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
--
        secret:
          defaultMode: 420
          secretName: kong-kong-validation-webhook-keypair
      - name: kong-opt
        persistentVolumeClaim:
          claimName: kong-pvc
status:

4.在容器中的/opt目录下创建kong/plugins目录,并在目录下创建myplugin插件目录

[root@master demo]# kubectl get pod -n kong  kong-kong-6c4576d5f7-th7bz
NAME                         READY   STATUS    RESTARTS   AGE
kong-kong-6c4576d5f7-th7bz   2/2     Running   0          4m33s
#创建/opt/kong/plugins/myplugin目录
[root@master demo]# kubectl exec -it -n kong  kong-kong-6c4576d5f7-th7bz -c proxy bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
kong@kong-kong-6c4576d5f7-th7bz:/$ mkdir -p /opt/kong/plugins/myplugin
5.将handler.lua和schema.lua文件复制到容器中的myplugin目录中
kubectl  cp   ./handler.lua   -c proxy kong/kong-kong-6c4576d5f7-th7bz:/opt/kong/plugins/myplugin
kubectl  cp   ./schema.lua   -c proxy kong/kong-kong-6c4576d5f7-th7bz:/opt/kong/plugins/myplugin

6.修改deploy中的KONG_PLUGINS变量的值,将myplugin插件添加到配置中

[root@master kong]# kubectl edit deploy kong-kong -n kong
[root@master kong]# kubectl  get  deploy kong-kong -n kong -o yaml|grep -A1 KONG_PLUGINS
        - name: KONG_PLUGINS
          value: bundled,myplugin
--
        - name: KONG_PLUGINS
          value: bundled,myplugin
--
        - name: KONG_PLUGINS
          value: bundled,myplugin

7.在konga中查看安装的myplugin插件

image.png

image.png

image.png

8.灰度发布

1.创建v1与v2版本的服务

  • v1 yaml配置内容
kind: Service
apiVersion: v1
metadata:
  name: app-v1
  labels:
    app: app-v1
spec:
  selector:
    app: app-v1
  ports:
  - name: web
    port: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: app-v1
spec:
  replicas: 1
  selector:
    matchLabels:
      app: app-v1
  template:
    metadata:
      labels:
        app: app-v1
    spec:
      containers:
      - name: app-v1
        image: nginx:alpine
        command:
        - /bin/sh
        - -c
        - echo v1 > /usr/share/nginx/html/index.html && nginx -g "daemon off;"
        ports:
        - name: web
          containerPort: 80
  • v2 yaml配置内容
kind: Service
apiVersion: v1
metadata:
  name: app-v2
  labels:
    app: app-v2
spec:
  selector:
    app: app-v2
  ports:
  - name: web
    port: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: app-v2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: app-v2
  template:
    metadata:
      labels:
        app: app-v2
    spec:
      containers:
      - name: app-v2
        image: nginx:alpine
        command:
        - /bin/sh
        - -c
        - echo v2 > /usr/share/nginx/html/index.html && nginx -g "daemon off;"
        ports:
        - name: web
          containerPort: 80

2.导入及访问测试

root@master ~]# kubectl  apply -f app-v1.yaml
service/app-v1 unchanged
deployment.apps/app-v1 unchanged
[root@master ~]# kubectl  apply -f app-v2.yaml
service/app-v2 created
deployment.apps/app-v2 created
[root@master ~]# kubectl get pod -o wide
NAME                                      READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
app-v1-799cf87d69-82hqn                   1/1     Running   0          2m43s   10.244.166.141   node1   <none>           <none>
app-v2-5fd4c96dcd-xb7ch                   1/1     Running   0          10s     10.244.166.142   node1   <none>           <none>
[root@master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
app-v1       ClusterIP   10.108.190.176   <none>        80/TCP    8m49s
app-v2       ClusterIP   10.98.98.95      <none>        80/TCP    24s
[root@master ~]# curl 10.108.190.176
v1
[root@master ~]# curl 10.98.98.95
v2

3.konga配置访问v1版本服务

  • 新建Upstreams配置 image.png image.png
  • 创建Services image.png
  • 创建Routes配置 image.png
  • 访问测试 image.png

4.新建灰度的Services、Upstreams、Routes配置

  • 灰度Upstreams配置

image.png image.png image.png

  • 灰度Services

image.png

image.png

  • 灰度Routes配置(这里添加一个app:huidu的Headers字段来区分)

image.png

image.png

5.访问测试

  • 正常访问是请求v1版本

image.png

image.png

  • 添加app:huidu的Headers字段访问v2版本

image.png

9.流量控制

  • 通过Upstreams的权重来控制流量比例

1.创建1个Upstreams,后端的Targets设置不同的权重(设置为5:1)

image.png

2.访问测试

image.png