K8s源码阅读-kubelet-获取pod变更

98 阅读2分钟

Pod资源来源

  • API Server
    K8s集群使用者创建的Pod
  • File
    K8s静态Pod,默认在每个节点/etc/kubernetes/manifests路径
  • URL
    远程Http Server存放的Pod资源

流程图

image.png

核心代码分析

从kubelet启动主函数入手,存在很多初始化的操作,先排除其他干扰项

汇总处理Pod变更的逻辑入口
// syncLoopIteration reads from various channels and dispatches pods to the  
// given handler.  
//  
// Arguments:  
// 1. configCh: a channel to read config events from  
// 2. handler: the SyncHandler to dispatch pods to  
// 3. syncCh: a channel to read periodic sync events from  
// 4. housekeepingCh: a channel to read housekeeping events from  
// 5. plegCh: a channel to read PLEG updates from  
//  
// Events are also read from the kubelet liveness manager's update channel.  
//  
// The workflow is to read from one of the channels, handle that event, and  
// update the timestamp in the sync loop monitor.  
//  
// Here is an appropriate place to note that despite the syntactical  
// similarity to the switch statement, the case statements in a select are  
// evaluated in a pseudorandom order if there are multiple channels ready to  
// read from when the select is evaluated. In other words, case statements  
// are evaluated in random order, and you can not assume that the case  
// statements evaluate in order if multiple channels have events.  
//  
// With that in mind, in truly no particular order, the different channels  
// are handled as follows:  
//  
// - configCh: dispatch the pods for the config change to the appropriate  
// handler callback for the event type  
// - plegCh: update the runtime cache; sync pod  
// - syncCh: sync all pods waiting for sync  
// - housekeepingCh: trigger cleanup of pods  
// - health manager: sync pods that have failed or in which one or more  
// containers have failed health checks  
//  
// 处理pod的所有变更逻辑(ADD、UPDATE、REMOVE、DELETE、RECONCILE)  
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,  
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {  
select {  
    // pod 变更  
case u, open := <-configCh:  
    // Update from a config source; dispatch it to the right handler  
    // callback.  
    if !open {  
    klog.ErrorS(nil, "Update channel is closed, exiting the sync loop")  
        return false  
    }  
switch u.Op {  
case kubetypes.ADD:  
    klog.V(2).InfoS("SyncLoop ADD", "source", u.Source, "pods", klog.KObjSlice(u.Pods))  
    // After restarting, kubelet will get all existing pods through  
    // ADD as if they are new pods. These pods will then go through the  
    // admission process and *may* be rejected. This can be resolved  
    // once we have checkpointing.  
    handler.HandlePodAdditions(u.Pods)  
case kubetypes.UPDATE:  
    klog.V(2).InfoS("SyncLoop UPDATE", "source", u.Source, "pods", klog.KObjSlice(u.Pods)) 
    handler.HandlePodUpdates(u.Pods)  
case kubetypes.REMOVE:  
    klog.V(2).InfoS("SyncLoop REMOVE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))  
     handler.HandlePodRemoves(u.Pods)  
case kubetypes.RECONCILE:  
     klog.V(4).InfoS("SyncLoop RECONCILE", "source", u.Source, "pods",klog.KObjSlice(u.Pods))  
    handler.HandlePodReconcile(u.Pods)  
case kubetypes.DELETE:  
    klog.V(2).InfoS("SyncLoop DELETE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))  
    // DELETE is treated as a UPDATE because of graceful deletion.  
    handler.HandlePodUpdates(u.Pods)  
case kubetypes.SET:  
    // TODO: Do we want to support this?  
    klog.ErrorS(nil, "Kubelet does not support snapshot update")  
default:  
    klog.ErrorS(nil, "Invalid operation type received", "operation", u.Op)  
}  
...
...
...
  
return true  
}