cilium AIO 环境信息记录

64 阅读22分钟

1. 环境

k8s



root@cili-control:~# k get node -A -o wide
NAME           STATUS   ROLES                  AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION       CONTAINER-RUNTIME
cili-control   Ready    control-plane,worker   3h    v1.33.1   11.0.1.132    <none>        Ubuntu 22.04.5 LTS   5.15.0-160-generic   containerd://1.7.13
root@cili-control:~# k get po -A -o wide
NAMESPACE     NAME                                   READY   STATUS    RESTARTS   AGE   IP              NODE           NOMINATED NODE   READINESS GATES
kube-system   cilium-k9xj8                           1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   cilium-operator-7c6b45754-jbqbw        1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   coredns-58f4c86d4d-4qxgh               1/1     Running   0          3h    10.233.64.53    cili-control   <none>           <none>
kube-system   coredns-58f4c86d4d-sqfbb               1/1     Running   0          3h    10.233.64.232   cili-control   <none>           <none>
kube-system   kube-apiserver-cili-control            1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   kube-controller-manager-cili-control   1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   kube-multus-ds-rhj2x                   1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   kube-proxy-888gs                       1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   kube-scheduler-cili-control            1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   nodelocaldns-84l6g                     1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
root@cili-control:~# k get po -A -o wide | grep cilium
kube-system   cilium-k9xj8                           1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>
kube-system   cilium-operator-7c6b45754-jbqbw        1/1     Running   0          3h    11.0.1.132      cili-control   <none>           <none>



root@cili-control:~# k get crd -A -o wide | grep cilium
ciliumcidrgroups.cilium.io                       2025-10-19T06:43:41Z
ciliumclusterwidenetworkpolicies.cilium.io       2025-10-19T06:43:41Z
ciliumendpoints.cilium.io                        2025-10-19T06:43:41Z
ciliumexternalworkloads.cilium.io                2025-10-19T06:43:41Z
ciliumidentities.cilium.io                       2025-10-19T06:43:41Z
ciliuml2announcementpolicies.cilium.io           2025-10-19T06:43:41Z
ciliumloadbalancerippools.cilium.io              2025-10-19T06:43:41Z
ciliumnetworkpolicies.cilium.io                  2025-10-19T06:43:41Z
ciliumnodeconfigs.cilium.io                      2025-10-19T06:43:41Z
ciliumnodes.cilium.io                            2025-10-19T06:43:41Z
ciliumpodippools.cilium.io                       2025-10-19T06:43:41Z


2. 网络


root@cili-control:~# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         11.0.1.2        0.0.0.0         UG    100    0        0 ens33
10.233.64.0     10.233.64.147   255.255.255.0   UG    0      0        0 cilium_host
10.233.64.147   0.0.0.0         255.255.255.255 UH    0      0        0 cilium_host
11.0.1.0        0.0.0.0         255.255.255.0   U     100    0        0 ens33
11.0.1.2        0.0.0.0         255.255.255.255 UH    100    0        0 ens33
root@cili-control:~# 
root@cili-control:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:c6:51:a4 brd ff:ff:ff:ff:ff:ff
    altname enp2s1
    inet 11.0.1.132/24 metric 100 brd 11.0.1.255 scope global dynamic ens33
       valid_lft 1532sec preferred_lft 1532sec
    inet6 fe80::20c:29ff:fec6:51a4/64 scope link 
       valid_lft forever preferred_lft forever
3: nodelocaldns: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    link/ether 56:3c:49:e5:6c:d2 brd ff:ff:ff:ff:ff:ff
    inet 169.254.25.10/32 scope global nodelocaldns
       valid_lft forever preferred_lft forever
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    link/ether ca:7c:9f:83:93:25 brd ff:ff:ff:ff:ff:ff
    inet 10.233.0.1/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.233.0.3/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.233.14.40/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
5: cilium_net@cilium_host: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 4e:ba:a2:c9:52:fd brd ff:ff:ff:ff:ff:ff
    inet6 fe80::4cba:a2ff:fec9:52fd/64 scope link 
       valid_lft forever preferred_lft forever
6: cilium_host@cilium_net: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether d2:77:69:cb:86:2b brd ff:ff:ff:ff:ff:ff
    inet 10.233.64.147/32 scope global cilium_host
       valid_lft forever preferred_lft forever
    inet6 fe80::d077:69ff:fecb:862b/64 scope link 
       valid_lft forever preferred_lft forever
7: cilium_vxlan: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
    link/ether ca:b6:50:d1:22:92 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::c8b6:50ff:fed1:2292/64 scope link 
       valid_lft forever preferred_lft forever
9: lxc_health@if8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 86:78:f4:06:cb:12 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::8478:f4ff:fe06:cb12/64 scope link 
       valid_lft forever preferred_lft forever
11: lxc507234951551@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 4a:85:e2:59:de:ed brd ff:ff:ff:ff:ff:ff link-netns cni-2ae79087-e613-51fb-9a26-97fae4e1a173
    inet6 fe80::4885:e2ff:fe59:deed/64 scope link 
       valid_lft forever preferred_lft forever
13: lxc3b598c0e6778@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 76:60:e9:61:f1:20 brd ff:ff:ff:ff:ff:ff link-netns cni-83cccf1c-69c1-ec62-9acb-d7d5a98b4737
    inet6 fe80::7460:e9ff:fe61:f120/64 scope link 
       valid_lft forever preferred_lft forever


cilium 完全不使用 iptables 和 ipvs

image.png

Cilium 路由的作用

image.png

image.png

Cilium 是基于 eBPF 的容器网络解决方案(常用于 Kubernetes),其路由的核心作用是实现 Pod 网络、主机网络与外部网络之间的流量转发,并为 eBPF 程序提供流量管控(如网络策略、负载均衡)的入口

  • 通过宿主机上的 cilium_host 接口: 实现 host <--> pod,
  • pod 网关就是该接口 ip,所以 pod (跨网段)出入的流量都会经过该接口

从提供的route -n输出来看,关键 Cilium 路由如下:

  1. 10.233.64.0/24 网段路由:目标网段为节点上的 Pod 网络(Kubernetes Pod 通常分配在独立网段),网关指向10.233.64.147(cilium_host 接口的 IP),接口为 cilium_host

    • 作用:所有发往该 Pod 网段的流量,会被引导至cilium_host接口,由 Cilium 的 eBPF 程序处理(如检查网络策略、转发到目标 Pod)。
  2. 10.233.64.147/32主机路由:目标为cilium_host接口自身的 IP,网关为0.0.0.0(直连),接口为cilium_host

    • 作用:标识cilium_host接口的 IP 地址,确保主机自身能通过该接口与 Pod 网络通信(如主机访问本地 Pod)。

Cilium 核心接口

image.png

  • cilium_net@cilium_hostcilium_host@cilium_net:一对 veth(虚拟以太网)接口(veth 对用于不同网络命名空间间通信)。

    • cilium_net:Cilium 网络栈的 “内部端”,负责与其他 Cilium 组件(如cilium_vxlan)交互。
    • cilium_host:绑定10.233.64.147/32IP,是主机在 Pod 网络中的 “身份标识”,所有 Pod 网络的流量需经过该接口,由 eBPF 程序处理(如网络策略检查、流量统计)。
  • cilium_vxlan:VXLAN 隧道接口,用于跨节点 Pod 通信。

    • 作用:在多节点 Kubernetes 集群中,不同节点的 Pod 属于同一逻辑网络(如10.233.0.0/16,VXLAN 通过将 Pod 数据包封装在 UDP 中,实现跨节点的二层通信(仿佛 Pod 在同一局域网)。

LXC 开头的接口

LXC 相关接口(Cilium 管理的 Pod 网络接口)

image.png

image.png

LXC 你可以理解为是一种命名习惯的继承

Cilium 通过类似 LXC 的命名方式管理 Pod 的 veth 接口(veth 对的主机端),格式为lxc<随机ID>@if<索引>

  • lxc_health@if8:Cilium 健康检查专用接口,对应健康检查容器(或网络命名空间)的 veth 对主机端。

    • 作用:用于 Cilium 自身网络状态监控(如检查网络连通性、eBPF 程序有效性)。
  • lxc507234951551@if10lxc3b598c0e6778@if12:连接具体 Pod 的 veth 接口(主机端)。

    • @if10/@if12:表示 veth 对的另一端接口索引(在 Pod 的网络命名空间内,通常名为eth0)。
    • link-netns:指向对应 Pod 的网络命名空间 ID,确保流量正确进入 Pod。
    • 作用:作为主机与 Pod 之间的 “数据通道”,所有进出 Pod 的流量都通过这些接口传输,Cilium 的 eBPF 程序会在此处拦截流量并应用网络策略。

mtu 都是 1500

host 网卡,host cilium 接口,pod 网卡都是 1500

image.png

Cilium 通过路由引导 Pod 网络流量至自身接口(如cilium_host),再通过 veth 对(cilium_net/cilium_host)、VXLAN 隧道(cilium_vxlan)和 Pod 专用 veth(lxcxxxx)实现流量转发,同时基于 eBPF 在这些接口上提供网络策略、监控等核心功能。

3. CRD

CRD 一览


root@cili-control:~# k get crd -A -o wide | grep cilium
ciliumcidrgroups.cilium.io                       2025-10-19T06:43:41Z
ciliumclusterwidenetworkpolicies.cilium.io       2025-10-19T06:43:41Z
ciliumendpoints.cilium.io                        2025-10-19T06:43:41Z
ciliumexternalworkloads.cilium.io                2025-10-19T06:43:41Z
ciliumidentities.cilium.io                       2025-10-19T06:43:41Z
ciliuml2announcementpolicies.cilium.io           2025-10-19T06:43:41Z
ciliumloadbalancerippools.cilium.io              2025-10-19T06:43:41Z
ciliumnetworkpolicies.cilium.io                  2025-10-19T06:43:41Z
ciliumnodeconfigs.cilium.io                      2025-10-19T06:43:41Z
ciliumnodes.cilium.io                            2025-10-19T06:43:41Z
ciliumpodippools.cilium.io                       2025-10-19T06:43:41Z


以下是每个 CRD 的详细功能和作用:

1. ciliumcidrgroups.cilium.io

功能:定义一组 CIDR(无类别域间路由)地址块的集合。

作用:将多个 CIDR 地址(如10.0.0.0/24192.168.1.0/24)归类为一个逻辑组,

方便在网络策略中统一引用。例如,在CiliumNetworkPolicy中通过引用 CIDR 组,

可快速允许 / 拒绝一组 IP 地址的流量,避免重复配置单个 CIDR。

2. ciliumclusterwidenetworkpolicies.cilium.io

功能:定义集群级别的网络策略

作用:Kubernetes 原生NetworkPolicy是命名空间级别的(仅作用于单个命名空间),而该 CRD 支持跨命名空间的全局策略可控制整个集群内所有 Pod、服务之间的流量

适用于需要统一管控集群全域流量的场景(如禁止所有命名空间的 Pod 访问外部公网)。

3. ciliumendpoints.cilium.io

功能:表示 Cilium 管理的网络端点(对应 Kubernetes 中的 Pod 或外部工作负载)。

作用:每个 Pod(或通过ciliumexternalworkloads定义的外部工作负载)在 Cilium 中会生成一个CiliumEndpoint资源,记录该端点的网络配置(如 IP 地址、MAC 地址)、身份标识(关联ciliumidentities)、所属网络策略等信息。

image.png

Cilium 通过该资源实现对端点的细粒度网络控制状态监控


root@cili-control:~# k get ciliumendpoints -n kube-system   coredns-58f4c86d4d-4qxgh -o yaml
apiVersion: cilium.io/v2
kind: CiliumEndpoint
metadata:
  creationTimestamp: "2025-10-19T06:44:04Z"
  generation: 1
  labels:
    k8s-app: kube-dns
    pod-template-hash: 58f4c86d4d
  name: coredns-58f4c86d4d-4qxgh
  namespace: kube-system
  ownerReferences:
  - apiVersion: v1
    kind: Pod
    name: coredns-58f4c86d4d-4qxgh
    uid: 7d509401-8c17-4b3d-a7e6-81d32a9a8696
  resourceVersion: "605"
  uid: 3f5dc417-f2f2-40d3-9ff6-ee5511b9dec9
status:
  encryption: {}
  external-identifiers:
    cni-attachment-id: 9df7223d4f3368089934339829fb7e8f6b7a251f26108a2110cb4c3d32b9dc2f:eth0
    container-id: 9df7223d4f3368089934339829fb7e8f6b7a251f26108a2110cb4c3d32b9dc2f
    k8s-namespace: kube-system
    k8s-pod-name: coredns-58f4c86d4d-4qxgh
    pod-name: kube-system/coredns-58f4c86d4d-4qxgh
  id: 1605
  identity:
    id: 52314
    labels:
    - k8s:io.cilium.k8s.namespace.labels.kubernetes.io/metadata.name=kube-system
    - k8s:io.cilium.k8s.policy.cluster=default
    - k8s:io.cilium.k8s.policy.serviceaccount=coredns
    - k8s:io.kubernetes.pod.namespace=kube-system
    - k8s:k8s-app=kube-dns
  named-ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
  networking:
    addressing:
    - ipv4: 10.233.64.53
    node: 11.0.1.132
  policy:
    egress:
      enforcing: false
      state: <status disabled>
    ingress:
      enforcing: false
      state: <status disabled>
  state: ready
  visibility-policy-status: <status disabled>

4. ciliumexternalworkloads.cilium.io

功能:定义集群外的工作负载(如物理机、虚拟机、非 K8s 容器)。

作用将集群外的 workload 纳入 Cilium 的网络和安全管理范围,为其分配身份标识(关联ciliumidentities)和 IP 地址,使其能与集群内 Pod 通过统一的网络策略通信。例 如,允许物理机上的应用安全访问 K8s 集群内的服务。

5. ciliumidentities.cilium.io

功能:定义 Cilium 用于安全策略的 “身份标识”。

作用:Cilium 基于 Pod / 外部工作负载的标签(如app=nginx)生成唯一身份(Identity),替代传统基于 IP 的策略控制。身份与标签动态绑定(IP 可能变化,但标签不变则身份不变),使网络策略更灵活(如 “允许所有app=frontend的 Pod 访问app=backend的 Pod”)。

该 CRD 记录身份与标签的映射关系。



root@cili-control:~# k get ciliumidentities -A -o wide
NAME    NAMESPACE     AGE
52314   kube-system   5h29m
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# 
root@cili-control:~# k get ciliumidentities 52314 -o yaml
apiVersion: cilium.io/v2
kind: CiliumIdentity
metadata:
  creationTimestamp: "2025-10-19T06:44:04Z"
  generation: 1
  labels:
    io.cilium.k8s.policy.cluster: default
    io.cilium.k8s.policy.serviceaccount: coredns
    io.kubernetes.pod.namespace: kube-system
    k8s-app: kube-dns
  name: "52314"
  resourceVersion: "603"
  uid: f51673de-0c9a-49ec-b721-818eef87cbc4
security-labels:
  k8s:io.cilium.k8s.namespace.labels.kubernetes.io/metadata.name: kube-system
  k8s:io.cilium.k8s.policy.cluster: default
  k8s:io.cilium.k8s.policy.serviceaccount: coredns
  k8s:io.kubernetes.pod.namespace: kube-system
  k8s:k8s-app: kube-dns


6. ciliuml2announcementpolicies.cilium.io

功能:配置二层(L2)网络中的地址通告策略

作用:控制 Cilium 如何在二层网络中通告 IP 地址(如 Pod IP、Service ClusterIP),确保外部网络(如物理交换机、路由器)能感知这些 IP 的位置,实现跨二层网络的通信。例如,在使用 BGP 或 ARP 通告时,定义哪些 IP 需要被广播到外部网络

热迁移需要意识到这些策略的影响: BGP(ECMP) + ACL(限制新的 vm 端口,直到热迁移完毕)

7. ciliumloadbalancerippools.cilium.io

功能管理分配给LoadBalancer类型服务的 IP 地址池

作用:当 Kubernetes 创建LoadBalancer类型的 Service 时,Cilium 可从该 CRD 定义的 IP 池中分配外部 IP,用于暴露服务到集群外。支持配置 IP 范围、排除特定 IP 等,解决外部负载均衡 IP 地址管理的问题。

8. ciliumnetworkpolicies.cilium.io

功能:Cilium 扩展的网络策略(比 K8s 原生策略功能更强)。

作用:支持多层级流量控制,包括:

  • L3/L4(网络 / 传输层):基于 IP、端口、协议的策略
  • L7(应用层):基于 HTTP 路径、gRPC 方法、DNS 域名等的策略(如 “仅允许GET /api/v1请求”);
  • 基于身份(关联ciliumidentities):直接通过标签定义策略,无需关心 IP 变化;
  • 跨命名空间 / 外部工作负载:控制集群内外的流量交互。

9. ciliumnodeconfigs.cilium.io

功能:定义节点级别的 Cilium 配置(覆盖全局配置)。

作用:为集群中的特定节点设置 Cilium 运行参数,例如:网络接口(eth0)、隧道模式(VXLAN/Geneve)、eBPF 程序加载路径等。适用于节点硬件 / 网络环境不一致的场景(如部分节点需要使用独立的隧道接口)。

10. ciliumnodes.cilium.io

功能:记录 Cilium 在节点上的运行状态和网络信息。

作用:是 Cilium 对 KubernetesNode资源的扩展,存储节点的 Cilium 代理状态(如是否就绪)、分配的 Pod CIDR 范围、二层网络信息(如 MAC 地址)等,帮助 Cilium 控制平面感知节点网络状态,实现跨节点流量调度。

root@cili-control:~# k get ciliumnodes -A -o wide 
NAME           CILIUMINTERNALIP   INTERNALIP   AGE
cili-control   10.233.64.147      11.0.1.132   5h35m
root@cili-control:~# 
root@cili-control:~# k get ciliumnodes cili-control -o yaml
apiVersion: cilium.io/v2
kind: CiliumNode
metadata:
  creationTimestamp: "2025-10-19T06:43:48Z"
  generation: 3
  labels:
    beta.kubernetes.io/arch: amd64
    beta.kubernetes.io/os: linux
    kubernetes.io/arch: amd64
    kubernetes.io/hostname: cili-control
    kubernetes.io/os: linux
    node-role.kubernetes.io/control-plane: ""
    node-role.kubernetes.io/worker: ""
    node.kubernetes.io/exclude-from-external-load-balancers: ""
  name: cili-control
  ownerReferences:
  - apiVersion: v1
    kind: Node
    name: cili-control
    uid: 42c4c5fe-feb7-4af2-a306-96952945a05d
  resourceVersion: "559"
  uid: 9edd7fc9-6df3-4f08-9350-12c56202a45c
spec:
  addresses:
  - ip: 11.0.1.132 # k8s node 管理 ip
    type: InternalIP
  - ip: 10.233.64.147
    type: CiliumInternalIP # node 上的 pod 网关
  alibaba-cloud: {}
  azure: {}
  bootid: 87c25842-2aac-4dc7-9d8e-44f1d1425a93
  encryption: {}
  eni: {}
  health:
    ipv4: 10.233.64.236
  ingress: {}
  ipam:
    podCIDRs:
    - 10.233.64.0/24
    pools: {}
status:
  alibaba-cloud: {}
  azure: {}
  eni: {}
  ipam:
    operator-status: {}



11. ciliumpodippools.cilium.io

功能:管理分配给 Pod 的 IP 地址池。

作用定义 Pod 可使用的 IP 地址范围,Cilium 会从这些池中为新建 Pod 分配 IP。支持按命名空间、节点划分不同 IP 池(如 “命名空间prod的 Pod 只能使用10.10.0.0/24”),实现 Pod IP 的精细化管理,避免 IP 冲突。

这些 CRD 共同构成了 Cilium 在 Kubernetes 中的核心控制平面,通过扩展 Kubernetes API 实现了灵活的网络编排、身份化安全策略和跨环境 workload 管理。

4. cilium 组件

image.png

  • cilium: agent
  • cilium-operator: 控制器

agent


root@cili-control:~# k get po -n kube-system   cilium-k9xj8 -o yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
    container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
    container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
    container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
  creationTimestamp: "2025-10-19T06:43:37Z"
  generateName: cilium-
  generation: 1
  labels:
    app.kubernetes.io/name: cilium-agent
    app.kubernetes.io/part-of: cilium
    controller-revision-hash: 5cf9699494
    k8s-app: cilium
    pod-template-generation: "1"
  name: cilium-k9xj8
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: cilium
    uid: 395acb79-4176-4def-8d4e-553cd52988c6
  resourceVersion: "591"
  uid: 5de188fb-1554-4a8c-8404-7862d86576d3
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchFields:
          - key: metadata.name
            operator: In
            values:
            - cili-control
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            k8s-app: cilium
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    command:
    - cilium-agent
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_CLUSTERMESH_CONFIG
      value: /var/lib/cilium/clustermesh/
    - name: GOMEMLIMIT
      valueFrom:
        resourceFieldRef:
          divisor: "1"
          resource: limits.memory
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command:
          - bash
          - -c
          - |
            set -o errexit
            set -o pipefail
            set -o nounset

            # When running in AWS ENI mode, it's likely that 'aws-node' has
            # had a chance to install SNAT iptables rules. These can result
            # in dropped traffic, so we should attempt to remove them.
            # We do it using a 'postStart' hook since this may need to run
            # for nodes which might have already been init'ed but may still
            # have dangling rules. This is safe because there are no
            # dependencies on anything that is part of the startup script
            # itself, and can be safely run multiple times per node (e.g. in
            # case of a restart).
            if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
            then
                echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
                iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
            fi
            echo 'Done!'
      preStop:
        exec:
          command:
          - /cni-uninstall.sh
    livenessProbe:
      failureThreshold: 10
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: cilium-agent
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    resources: {}
    securityContext:
      appArmorProfile:
        type: Unconfined
      capabilities:
        add:
        - CHOWN
        - KILL
        - NET_ADMIN
        - NET_RAW
        - IPC_LOCK
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        - DAC_OVERRIDE
        - FOWNER
        - SETGID
        - SETUID
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    startupProbe:
      failureThreshold: 105
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      initialDelaySeconds: 5
      periodSeconds: 2
      successThreshold: 1
      timeoutSeconds: 1
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/proc/sys/net
      name: host-proc-sys-net
    - mountPath: /host/proc/sys/kernel
      name: host-proc-sys-kernel
    - mountPath: /sys/fs/bpf
      mountPropagation: HostToContainer
      name: bpf-maps
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /host/etc/cni/net.d
      name: etc-cni-netd
    - mountPath: /var/lib/cilium/clustermesh
      name: clustermesh-secrets
      readOnly: true
    - mountPath: /lib/modules
      name: lib-modules
      readOnly: true
    - mountPath: /run/xtables.lock
      name: xtables-lock
    - mountPath: /var/lib/cilium/tls/hubble
      name: hubble-tls
      readOnly: true
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  initContainers:
  - command:
    - cilium-dbg
    - build-config
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: config
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-mount /hostbin/cilium-mount;
      nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
      rm /hostbin/cilium-mount
    env:
    - name: CGROUP_ROOT
      value: /run/cilium/cgroupv2
    - name: BIN_PATH
      value: /opt/cni/bin
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: mount-cgroup
    resources: {}
    securityContext:
      appArmorProfile:
        type: Unconfined
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
      nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
      rm /hostbin/cilium-sysctlfix
    env:
    - name: BIN_PATH
      value: /opt/cni/bin
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: apply-sysctl-overwrites
    resources: {}
    securityContext:
      appArmorProfile:
        type: Unconfined
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  - args:
    - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
    command:
    - /bin/bash
    - -c
    - --
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: mount-bpf-fs
    resources: {}
    securityContext:
      privileged: true
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      mountPropagation: Bidirectional
      name: bpf-maps
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  - command:
    - /init-container.sh
    env:
    - name: CILIUM_ALL_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-state
          name: cilium-config
          optional: true
    - name: CILIUM_BPF_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-bpf-state
          name: cilium-config
          optional: true
    - name: WRITE_CNI_CONF_WHEN_READY
      valueFrom:
        configMapKeyRef:
          key: write-cni-conf-when-ready
          name: cilium-config
          optional: true
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: clean-cilium-state
    resources: {}
    securityContext:
      appArmorProfile:
        type: Unconfined
      capabilities:
        add:
        - NET_ADMIN
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      name: bpf-maps
    - mountPath: /run/cilium/cgroupv2
      mountPropagation: HostToContainer
      name: cilium-cgroup
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  - command:
    - /install-plugin.sh
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/cilium:v1.15.3
    imagePullPolicy: IfNotPresent
    name: install-cni-binaries
    resources:
      requests:
        cpu: 100m
        memory: 10Mi
    securityContext:
      capabilities:
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/opt/cni/bin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-cs8l9
      readOnly: true
  nodeName: cili-control
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000001000
  priorityClassName: system-node-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium
  serviceAccountName: cilium
  terminationGracePeriodSeconds: 1
  tolerations:
  - operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
  volumes:
  - emptyDir: {}
    name: tmp
  - hostPath:
      path: /var/run/cilium
      type: DirectoryOrCreate
    name: cilium-run
  - hostPath:
      path: /sys/fs/bpf
      type: DirectoryOrCreate
    name: bpf-maps
  - hostPath:
      path: /proc
      type: Directory
    name: hostproc
  - hostPath:
      path: /run/cilium/cgroupv2
      type: DirectoryOrCreate
    name: cilium-cgroup
  - hostPath:
      path: /opt/cni/bin
      type: DirectoryOrCreate
    name: cni-path
  - hostPath:
      path: /etc/cni/net.d
      type: DirectoryOrCreate
    name: etc-cni-netd
  - hostPath:
      path: /lib/modules
      type: ""
    name: lib-modules
  - hostPath:
      path: /run/xtables.lock
      type: FileOrCreate
    name: xtables-lock
  - name: clustermesh-secrets
    projected:
      defaultMode: 256
      sources:
      - secret:
          name: cilium-clustermesh
          optional: true
      - secret:
          items:
          - key: tls.key
            path: common-etcd-client.key
          - key: tls.crt
            path: common-etcd-client.crt
          - key: ca.crt
            path: common-etcd-client-ca.crt
          name: clustermesh-apiserver-remote-cert
          optional: true
  - hostPath:
      path: /proc/sys/net
      type: Directory
    name: host-proc-sys-net
  - hostPath:
      path: /proc/sys/kernel
      type: Directory
    name: host-proc-sys-kernel
  - name: hubble-tls
    projected:
      defaultMode: 256
      sources:
      - secret:
          items:
          - key: tls.crt
            path: server.crt
          - key: tls.key
            path: server.key
          - key: ca.crt
            path: client-ca.crt
          name: hubble-server-certs
          optional: true
  - name: kube-api-access-cs8l9
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace

operator 控制器


root@cili-control:~# k get po -n kube-system   cilium-operator-7c6b45754-jbqbw -o yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    prometheus.io/port: "9963"
    prometheus.io/scrape: "true"
  creationTimestamp: "2025-10-19T06:43:37Z"
  generateName: cilium-operator-7c6b45754-
  generation: 1
  labels:
    app.kubernetes.io/name: cilium-operator
    app.kubernetes.io/part-of: cilium
    io.cilium/app: operator
    name: cilium-operator
    pod-template-hash: 7c6b45754
  name: cilium-operator-7c6b45754-jbqbw
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: cilium-operator-7c6b45754
    uid: 163b247c-e515-4920-92e7-5b43c750dc75
  resourceVersion: "532"
  uid: 559c7ce6-7563-4760-841d-0c5dec6fedee
spec:
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            io.cilium/app: operator
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    - --debug=$(CILIUM_DEBUG)
    command:
    - cilium-operator-generic
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_DEBUG
      valueFrom:
        configMapKeyRef:
          key: debug
          name: cilium-config
          optional: true
    image: registry.cn-beijing.aliyuncs.com/kubesphereio/operator-generic:v1.15.3
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 9234
        scheme: HTTP
      initialDelaySeconds: 60
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 3
    name: cilium-operator
    ports:
    - containerPort: 9963
      hostPort: 9963
      name: prometheus
      protocol: TCP
    readinessProbe:
      failureThreshold: 5
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 9234
        scheme: HTTP
      periodSeconds: 5
      successThreshold: 1
      timeoutSeconds: 3
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp/cilium/config-map
      name: cilium-config-path
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-n8vw7
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  nodeName: cili-control
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000000000
  priorityClassName: system-cluster-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium-operator
  serviceAccountName: cilium-operator
  terminationGracePeriodSeconds: 30
  tolerations:
  - operator: Exists
  volumes:
  - configMap:
      defaultMode: 420
      name: cilium-config
    name: cilium-config-path
  - name: kube-api-access-n8vw7
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace


cilium-config


root@cili-control:~# k get cm -n kube-system cilium-config -o yaml
apiVersion: v1
data:
  agent-not-ready-taint-key: node.cilium.io/agent-not-ready
  arping-refresh-period: 30s
  auto-direct-node-routes: "false" # 可以试用下
  bpf-lb-acceleration: disabled # 需要用
  bpf-lb-external-clusterip: "false" # 可以试用下
  bpf-lb-map-max: "65536" 
  bpf-lb-sock: "false" # 可以试用下
  bpf-map-dynamic-size-ratio: "0.0025"
  bpf-policy-map-max: "16384"
  bpf-root: /sys/fs/bpf
  cgroup-root: /run/cilium/cgroupv2
  cilium-endpoint-gc-interval: 5m0s
  cluster-id: "0"
  cluster-name: default
  cluster-pool-ipv4-cidr: 10.233.64.0/18
  cluster-pool-ipv4-mask-size: "24"
  cni-exclusive: "true" 
  cni-log-file: /var/run/cilium/cilium-cni.log
  custom-cni-conf: "false" # 可以试用下
  debug: "false"
  debug-verbose: ""
  dnsproxy-enable-transparent-mode: "true"
  egress-gateway-reconciliation-trigger-interval: 1s
  enable-auto-protect-node-port-range: "true"
  enable-bgp-control-plane: "false"  # 需要用
  enable-bpf-clock-probe: "false" # 需要用
  enable-endpoint-health-checking: "true"
  enable-external-ips: "false" # 需要用
  enable-health-check-loadbalancer-ip: "false"
  enable-health-check-nodeport: "true"
  enable-health-checking: "true"
  enable-host-port: "false" # 可以试用下
  enable-hubble: "true"
  enable-ipv4: "true"
  enable-ipv4-big-tcp: "false" # 可以试用下
  enable-ipv4-masquerade: "true"
  enable-ipv6: "false" # 需要用
  enable-ipv6-big-tcp: "false" # 需要用
  enable-ipv6-masquerade: "true" # 可以试用下
  enable-k8s-networkpolicy: "true"
  enable-k8s-terminating-endpoint: "true"
  enable-l2-neigh-discovery: "true"
  enable-l7-proxy: "true" # 需要用
  enable-local-redirect-policy: "false"
  enable-masquerade-to-route-source: "false"
  enable-metrics: "true"
  enable-node-port: "false"
  enable-policy: default
  enable-remote-node-identity: "true"
  enable-sctp: "false" # 可以试用下
  enable-svc-source-range-check: "true"
  enable-vtep: "false" # 可以试用下
  enable-well-known-identities: "false"
  enable-xt-socket-fallback: "true"
  external-envoy-proxy: "false" # 可以试用下
  hubble-disable-tls: "false" 
  hubble-export-file-max-backups: "5"
  hubble-export-file-max-size-mb: "10"
  hubble-listen-address: :4244
  hubble-socket-path: /var/run/cilium/hubble.sock
  hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
  hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
  hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
  identity-allocation-mode: crd
  identity-gc-interval: 15m0s
  identity-heartbeat-timeout: 30m0s
  install-no-conntrack-iptables-rules: "false"
  ipam: cluster-pool 
  ipam-cilium-node-update-rate: 15s
  k8s-client-burst: "20"
  k8s-client-qps: "10" # 可以试用下
  kube-proxy-replacement: "false" # 需要用
  kube-proxy-replacement-healthz-bind-address: ""
  max-connected-clusters: "255"
  mesh-auth-enabled: "true"
  mesh-auth-gc-interval: 5m0s
  mesh-auth-queue-size: "1024"
  mesh-auth-rotated-identities-queue-size: "1024"
  monitor-aggregation: medium
  monitor-aggregation-flags: all
  monitor-aggregation-interval: 5s
  node-port-bind-protection: "true"
  nodes-gc-interval: 5m0s
  operator-api-serve-addr: 127.0.0.1:9234
  operator-prometheus-serve-addr: :9963
  policy-cidr-match-mode: ""
  preallocate-bpf-maps: "false"
  procfs: /host/proc
  proxy-connect-timeout: "2"
  proxy-max-connection-duration-seconds: "0"
  proxy-max-requests-per-connection: "0"
  proxy-prometheus-port: "9964"
  remove-cilium-node-taints: "true"
  routing-mode: tunnel
  service-no-backend-response: reject # 需要用
  set-cilium-is-up-condition: "true"
  set-cilium-node-taints: "true"
  sidecar-istio-proxy-image: cilium/istio_proxy
  skip-cnp-status-startup-clean: "false"
  synchronize-k8s-nodes: "true"
  tofqdns-dns-reject-response-code: refused
  tofqdns-enable-dns-compression: "true"
  tofqdns-endpoint-max-ip-per-hostname: "50"
  tofqdns-idle-connection-grace-period: 0s
  tofqdns-max-deferred-connection-deletes: "10000"
  tofqdns-proxy-response-max-delay: 100ms
  tunnel-protocol: vxlan # 默认模式
  unmanaged-pod-watcher-interval: "15"
  vtep-cidr: ""
  vtep-endpoint: ""
  vtep-mac: ""
  vtep-mask: ""
  write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
  annotations:
    meta.helm.sh/release-name: cilium
    meta.helm.sh/release-namespace: kube-system
  creationTimestamp: "2025-10-19T06:43:34Z"
  labels:
    app.kubernetes.io/managed-by: Helm
  name: cilium-config
  namespace: kube-system
  resourceVersion: "314"
  uid: 11016253-a373-457b-805e-ece9c38b7be7


1