记录一个在 kube-ovn kind 环境中,基于 kube-ovn eip 来实现 ipsec site to site 场景的一个 demo 测试
下面是网络拓扑
- moon 和 alice 在同一个 vpc1 子网 vpc1-subnet1 中
- sun 和 bob 在同一个 vpc2 子网 vpc2-subnet1 中
- mars 和 carrie 在同一个 vpc3 子网 vpc3-subnet1 中
moon sun mars 都是 ipsec vpn gw
1. 创建 kube-ovn 环境
本次测试的环境是基于 kube-ovn kind 直接拉起的,
make kind-init-ha; make kind-install-webhook; make ovn-vpc-nat-gw-conformance-e2e
▶ k get subnet
NAME PROVIDER VPC VLAN PROTOCOL CIDR PRIVATE NAT DEFAULT GATEWAYTYPE V4USED V4AVAILABLE V6USED V6AVAILABLE EXCLUDEIPS U2OINTERCONNECTIONIP
external ovn ovn-cluster vlan-144505001 IPv4 172.19.0.0/16 false false false distributed 9 65521 0 0 ["172.19.0.1","172.19.0.2","172.19.0.3","172.19.0.4"]
join ovn ovn-cluster IPv4 100.64.0.0/16 false false false 3 65530 0 0 ["100.64.0.1"]
no-bfd-subnet-156456980 ovn no-bfd-vpc-110691683 IPv4 192.168.0.0/24 false false false distributed 5 248 0 0 ["192.168.0.1"]
ovn-default ovn ovn-cluster IPv4 10.16.0.0/16 false true true distributed 9 65524 0 0 ["10.16.0.1"]
vpc1-subnet1 ovn vpc1 IPv4 10.1.0.0/24 false false false distributed 4 249 0 0 ["10.1.0.1"]
vpc2-subnet1 ovn vpc2 IPv4 10.2.0.0/24 false false false distributed 4 249 0 0 ["10.2.0.1"]
vpc3-subnet1 ovn vpc3 IPv4 10.3.0.0/24 false false false distributed 4 249 0 0 ["10.3.0.1"]
三个 vpc subnet 创建脚本
root@debian:guide/ipsec-vpn/01-vpc-subnet main ✗ 1d0h ◒
▶ cat 01-ns1-vpc1-subnet.yaml
kind: Vpc
apiVersion: kubeovn.io/v1
metadata:
name: vpc1
spec:
namespaces:
- ns1
enableExternal: true
staticRoutes:
- cidr: 10.2.0.0/24
nextHopIP: 10.1.0.11
policy: policyDst
- cidr: 10.3.0.0/24
nextHopIP: 10.1.0.11
policy: policyDst
---
apiVersion: kubeovn.io/v1
kind: Subnet
metadata:
name: vpc1-subnet1
spec:
cidrBlock: 10.1.0.0/24
default: false
disableGatewayCheck: false
disableInterConnection: true
enableEcmp: false
gatewayNode: ""
gatewayType: distributed
natOutgoing: false
private: false
protocol: IPv4
provider: ovn
vpc: vpc1
namespaces:
- ns1
(v.v)
root@debian:guide/ipsec-vpn/01-vpc-subnet main ✗ 1d0h ◒
▶ cat 02-ns1-vpc2-subnet.yaml
kind: Vpc
apiVersion: kubeovn.io/v1
metadata:
name: vpc2
spec:
namespaces:
- ns1
enableExternal: true
staticRoutes:
- cidr: 10.1.0.0/24
nextHopIP: 10.2.0.22
policy: policyDst
---
apiVersion: kubeovn.io/v1
kind: Subnet
metadata:
name: vpc2-subnet1
spec:
cidrBlock: 10.2.0.0/24
default: false
disableGatewayCheck: false
disableInterConnection: true
enableEcmp: false
gatewayNode: ""
gatewayType: distributed
natOutgoing: false
private: false
protocol: IPv4
provider: ovn
vpc: vpc2
namespaces:
- ns1
(v.v)
root@debian:guide/ipsec-vpn/01-vpc-subnet main ✗ 1d0h ◒
▶ cat 03-ns1-vpc3-subnet.yaml
kind: Vpc
apiVersion: kubeovn.io/v1
metadata:
name: vpc3
spec:
namespaces:
- ns1
enableExternal: true
staticRoutes:
- cidr: 10.1.0.0/24
nextHopIP: 10.3.0.33
policy: policyDst
---
apiVersion: kubeovn.io/v1
kind: Subnet
metadata:
name: vpc3-subnet1
spec:
cidrBlock: 10.3.0.0/24
default: false
disableGatewayCheck: false
disableInterConnection: true
enableEcmp: false
gatewayNode: ""
gatewayType: distributed
natOutgoing: false
private: false
protocol: IPv4
provider: ovn
vpc: vpc3
namespaces:
- ns1
(v.v)
root@debian:guide/ipsec-vpn/01-vpc-subnet main ✗
ipsec vpn 都是需要走 vpc 内部的 ipsec vpn 网关的,所以需要在 kube-ovn 上配置路由,以下是路由信息:
root@debian:docs/guide/ipsec-vpn main ✗ 8h35m ◒
▶ k ko nbctl lr-route-list vpc1
IPv4 Routes
Route Table <main>:
10.2.0.0/24 10.1.0.11 dst-ip
10.3.0.0/24 10.1.0.11 dst-ip
(v.v)
root@debian:docs/guide/ipsec-vpn main ✗ 8h35m ◒
▶ k ko nbctl lr-route-list vpc2
IPv4 Routes
Route Table <main>:
10.1.0.0/24 10.2.0.22 dst-ip
(v.v)
root@debian:docs/guide/ipsec-vpn main ✗ 8h35m ◒
▶ k ko nbctl lr-route-list vpc3
IPv4 Routes
Route Table <main>:
10.1.0.0/24 10.3.0.33 dst-ip
root@debian:guide/ipsec-vpn/02-keepalived-no-fip main ✗
2. 基于一个 operator 拉起 ipsec vpn gw server
root@debian:guide/ipsec-vpn/02-ipsec-fip main ✗ 1d2h ◒
▶ cat 01-moon.yaml
---
kind: Vip
apiVersion: kubeovn.io/v1
metadata:
name: moon-keepalived-vip
spec:
subnet: vpc1-subnet1
v4ip: 10.1.0.11
---
kind: KeepAlived
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: moon-keepalived
namespace: ns1
spec:
image: icoy/kube-combo-keepalived:v0.0.6
subnet: vpc1-subnet1
vipV4: 10.1.0.11
---
kind: OvnEip
apiVersion: kubeovn.io/v1
metadata:
name: moon
spec:
externalSubnet: external
v4Ip: 172.19.0.101
type: nat
---
kind: OvnFip
apiVersion: kubeovn.io/v1
metadata:
name: moon
spec:
ovnEip: moon
ipName: moon-0.ns1
---
kind: VpnGw
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: moon
namespace: ns1
spec:
workloadType: statefulset
cpu: "1"
memory: "1024M"
qosBandwidth: "20"
replicas: 2
enableIpsecVpn: true
ipsecSecret: moon-ipsec-vpn
ipsecVpnImage: icoy/kube-combo-strongswan:v0.0.6
keepalived: moon-keepalived
---
kind: IpsecConn
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: moon-sun
namespace: ns1
spec:
vpnGw: moon
# pubkey use cert-manager x509 ca, support ipv4 ipv6
# psk use pre shared key, not tested
auth: pubkey
# default should be safe enough
proposals: default
# ike version 0, 1, 2
ikeVersion: "2"
localCN: moon.vpn.gw.com
localPublicIp: 172.19.0.101
localPrivateCidrs: 10.1.0.0/24
remoteCN: sun.vpn.gw.com
remotePublicIp: 172.19.0.102
remotePrivateCidrs: 10.2.0.0/24
---
kind: IpsecConn
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: moon-mars
namespace: ns1
spec:
vpnGw: moon
# pubkey use cert-manager x509 ca, support ipv4 ipv6
# psk use pre shared key, not tested
auth: pubkey
# default should be safe enough
proposals: default
# ike version 0, 1, 2
ikeVersion: "2"
localCN: moon.vpn.gw.com
localPublicIp: 172.19.0.101
localPrivateCidrs: 10.1.0.0/24
remoteCN: mars.vpn.gw.com
remotePublicIp: 172.19.0.103
remotePrivateCidrs: 10.3.0.0/24
(v.v)
root@debian:guide/ipsec-vpn/02-ipsec-fip main ✗ 1d2h ◒
▶ cat 02-sun.yaml
---
kind: Vip
apiVersion: kubeovn.io/v1
metadata:
name: sun-keepalived-vip
spec:
subnet: vpc2-subnet1
v4ip: 10.2.0.22
---
kind: KeepAlived
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: sun-keepalived
namespace: ns1
spec:
image: icoy/kube-combo-keepalived:v0.0.6
subnet: vpc2-subnet1
vipV4: 10.2.0.22
---
kind: OvnEip
apiVersion: kubeovn.io/v1
metadata:
name: sun
spec:
externalSubnet: external
v4Ip: 172.19.0.102
type: nat
---
kind: OvnFip
apiVersion: kubeovn.io/v1
metadata:
name: sun
spec:
ovnEip: sun
ipName: sun-0.ns1
---
kind: VpnGw
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: sun
namespace: ns1
spec:
cpu: "1"
memory: "1024M"
qosBandwidth: "20"
replicas: 2
workloadType: statefulset
enableIpsecVpn: true
ipsecSecret: sun-ipsec-vpn
ipsecVpnImage: icoy/kube-combo-strongswan:v0.0.6
keepalived: sun-keepalived
---
kind: IpsecConn
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: sun-moon
namespace: ns1
spec:
vpnGw: sun
# pubkey use cert-manager x509 ca, support ipv4 ipv6
# psk use pre shared key, not tested
auth: pubkey
# default should be safe enough
proposals: default
# ike version 0, 1, 2
ikeVersion: "2"
localCN: sun.vpn.gw.com
localPublicIp: 172.19.0.102
localPrivateCidrs: 10.2.0.0/24
remoteCN: moon.vpn.gw.com
remotePublicIp: 172.19.0.101
remotePrivateCidrs: 10.1.0.0/24
(v.v)
root@debian:guide/ipsec-vpn/02-ipsec-fip main ✗ 1d2h ◒
▶ cat 03-mars.yaml
kind: Vip
apiVersion: kubeovn.io/v1
metadata:
name: mars-keepalived-vip
spec:
subnet: vpc3-subnet1
v4ip: 10.3.0.33
---
kind: KeepAlived
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: mars-keepalived
namespace: ns1
spec:
image: icoy/kube-combo-keepalived:v0.0.6
subnet: vpc3-subnet1
vipV4: 10.3.0.33
---
kind: OvnEip
apiVersion: kubeovn.io/v1
metadata:
name: mars
spec:
externalSubnet: external
type: fip
v4Ip: 172.19.0.103
---
kind: OvnFip
apiVersion: kubeovn.io/v1
metadata:
name: mars
spec:
ovnEip: mars
ipName: mars-0.ns1
---
kind: VpnGw
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: mars
namespace: ns1
spec:
workloadType: statefulset
cpu: "1"
memory: "1024M"
qosBandwidth: "20"
enableIpsecVpn: true
replicas: 2
ipsecSecret: mars-ipsec-vpn
ipsecVpnImage: icoy/kube-combo-strongswan:v0.0.6
keepalived: mars-keepalived
---
kind: IpsecConn
apiVersion: vpn-gw.kubecombo.com/v1
metadata:
name: mars-moon
namespace: ns1
spec:
vpnGw: mars
# pubkey use cert-manager x509 ca, support ipv4 ipv6
# psk use pre shared key, not tested
auth: pubkey
# default should be safe enough
proposals: default
# ike version 0, 1, 2
ikeVersion: "2"
localCN: mars.vpn.gw.com
localPublicIp: 172.19.0.103
localPrivateCidrs: 10.3.0.0/24
remoteCN: moon.vpn.gw.com
remotePublicIp: 172.19.0.101
remotePrivateCidrs: 10.1.0.0/24
(v.v)
root@debian:guide/ipsec-vpn/02-ipsec-fip main ✗
观察者
root@debian:guide/ipsec-vpn/02-ipsec-fip main ✗ 1d2h ◒
▶ cat 04-alice-bob-carrie.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: alice
namespace: ns1
labels:
app: alice
spec:
replicas: 1
selector:
matchLabels:
app: alice
template:
metadata:
labels:
app: alice
annotations:
ovn.kubernetes.io/logical_switch: vpc1-subnet1
spec:
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: Never
ports:
- containerPort: 80
- name: netshoot
image: nicolaka/netshoot
imagePullPolicy: Never
command: ["/bin/bash"]
args: ["-c", "while true; do ping localhost; sleep 60;done"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: bob
namespace: ns1
labels:
app: bob
spec:
replicas: 1
selector:
matchLabels:
app: bob
template:
metadata:
labels:
app: bob
annotations:
ovn.kubernetes.io/logical_switch: vpc2-subnet1
spec:
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: Never
ports:
- containerPort: 80
- name: netshoot
image: nicolaka/netshoot
imagePullPolicy: Never
command: ["/bin/bash"]
args: ["-c", "while true; do ping localhost; sleep 60;done"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: carrie
namespace: ns1
labels:
app: carrie
spec:
replicas: 1
selector:
matchLabels:
app: carrie
template:
metadata:
labels:
app: carrie
annotations:
ovn.kubernetes.io/logical_switch: vpc3-subnet1
spec:
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: Never
ports:
- containerPort: 80
- name: netshoot
image: nicolaka/netshoot
imagePullPolicy: Never
command: ["/bin/bash"]
args: ["-c", "while true; do ping localhost; sleep 60;done"]
3. 测试 ipsec server gw 之间的内网连通性
moon connect sun and mars ok
可以看到 通是没问题的,但是有丢包的情况,而且不是首包丢包
1k个包,丢包率为0
4. alice --> bob, carrie
1k包,丢包率为0,但有错误包
错误包,集中在前几个,不复现