osm 源码简单分析

343 阅读2分钟

简介

作为 xds 的 server,连接上来的 envoy 都会注册到 meshcatalog,断开则取消注册

每一分钟或者 meshspec/cert/ingress/namespace/endpoint/deployment 有变化,则下发配置到 meshcatalog 中已注册的 envoy

当然也有 envoy 主动获取的

源码

cmd/osm-controller/osm-controller.go 中

func main() {
    ...
    kubeClient := kubernetes.NewForConfigOrDie(kubeConfig)
    ...
	namespaceController := namespace.NewNamespaceController(kubeClient, meshName, stop)
	meshSpec, err := smi.NewMeshSpecClient(*smiKubeConfig, kubeClient, osmNamespace, namespaceController, stop)
    ...
	provider, err := kube.NewProvider(kubeClient, namespaceController, stop, constants.KubeProviderName, cfg)
    ...
	endpointsProviders := []endpoint.Provider{provider}
    ...
	ingressClient, err := ingress.NewIngressClient(kubeClient, namespaceController, stop, cfg)
    ...
    meshCatalog := catalog.NewMeshCatalog(
		namespaceController,
		kubeClient,
		meshSpec,
		certManager,
		ingressClient,
		stop,
		cfg,
		endpointsProviders...)
    ...
	xdsServer := ads.NewADSServer(ctx, meshCatalog, enableDebugServer, osmNamespace, cfg)
    ...
	grpcServer, lis := utils.NewGrpc(serverType, *port, adsCert.GetCertificateChain(), adsCert.GetPrivateKey(), adsCert.GetIssuingCA())
	xds_discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, xdsServer)

	go utils.GrpcServe(ctx, grpcServer, lis, cancel, serverType)
}

pkg/utils/grpc.go 中

创建grpc server
func NewGrpc(serverType string, port int, certPem, keyPem, rootCertPem []byte) (*grpc.Server, net.Listener) {
	...
	return grpc.NewServer(grpcOptions...), lis
}

pkg/envoy/ads/server.go 中

创建ads server
func NewADSServer(ctx context.Context, meshCatalog catalog.MeshCataloger, enableDebug bool, osmNamespace string, cfg configurator.Configurator) *Server {
	server := Server{
		catalog:      meshCatalog,
		ctx:          ctx,
		xdsHandlers:  getHandlers(),
		enableDebug:  enableDebug,
		osmNamespace: osmNamespace,
		cfg:          cfg,
	}

	if enableDebug {
		server.xdsLog = make(map[certificate.CommonName]map[envoy.TypeURI][]time.Time)
	}

	return &server
}

func getHandlers() map[envoy.TypeURI]func(context.Context, catalog.MeshCataloger, *envoy.Proxy, *xds_discovery.DiscoveryRequest, configurator.Configurator) (*xds_discovery.DiscoveryResponse, error) {
	return map[envoy.TypeURI]func(context.Context, catalog.MeshCataloger, *envoy.Proxy, *xds_discovery.DiscoveryRequest, configurator.Configurator) (*xds_discovery.DiscoveryResponse, error){
		envoy.TypeEDS: eds.NewResponse,
		envoy.TypeCDS: cds.NewResponse,
		envoy.TypeRDS: rds.NewResponse,
		envoy.TypeLDS: lds.NewResponse,
		envoy.TypeSDS: sds.NewResponse,
	}
}

pkg/envoy/ads/stream.go 中

envoy获取请求xds server或者有变化后的推送
func (s *Server) StreamAggregatedResources(server xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error {
    ...
	go receive(requests, &server, proxy, quit)

	for {
        select {
            ...
		    case discoveryRequest, ok := <-requests:
                ...
			    resp, err := s.newAggregatedDiscoveryResponse(proxy, &discoveryRequest, s.cfg)
                ...

			    if err := server.Send(resp); err != nil {
				    log.Error().Err(err).Msgf("Error sending DiscoveryResponse")
			    }

		    case <-proxy.GetAnnouncementsChannel():
			    s.sendAllResponses(proxy, &server, s.cfg)
		}
	}
}

pkg/envoy/ads/grpc.go 中

接受envoy请求
func receive(requests chan xds_discovery.DiscoveryRequest, server *xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer, proxy *envoy.Proxy, quit chan struct{}) {
	defer close(requests)
	defer close(quit)
	for {
		var request *xds_discovery.DiscoveryRequest
		request, recvErr := (*server).Recv()
		...
		requests <- *request
	}
}

pkg/envoy/ads/response.go 中

获取xds处理函数
func (s *Server) newAggregatedDiscoveryResponse(proxy *envoy.Proxy, request *xds_discovery.DiscoveryRequest, cfg configurator.Configurator) (*xds_discovery.DiscoveryResponse, error) {
	typeURL := envoy.TypeURI(request.TypeUrl)
	handler, ok := s.xdsHandlers[typeURL]
    ...
	response, err := handler(s.ctx, s.catalog, proxy, request, cfg)
    ...
	return response, nil
}

pkg/catalog/routes.go 中

获取路由
func (mc *MeshCatalog) ListTrafficPolicies(service service.MeshService) ([]trafficpolicy.TrafficTarget, error) {
	宽容模式
	if mc.configurator.IsPermissiveTrafficPolicyMode() {
		trafficPolicies, err := mc.buildAllowAllTrafficPolicies(service)
		if err != nil {
			return nil, err
		}
		return trafficPolicies, nil
	}

	allRoutes, err := mc.getHTTPPathsPerRoute()
	if err != nil {
		return nil, err
	}

	allTrafficPolicies, err := getTrafficPolicyPerRoute(mc, allRoutes, service)
	if err != nil {
		return nil, err
	}
	return allTrafficPolicies, nil
}

pkg/smi/client.go 中

监控meshspec变化
func NewMeshSpecClient(smiKubeConfig *rest.Config, kubeClient kubernetes.Interface, osmNamespace string, namespaceController namespace.Controller, stop chan struct{}) (MeshSpec, error) {
	smiTrafficSplitClientSet := smiTrafficSplitClient.NewForConfigOrDie(smiKubeConfig)
	smiTrafficSpecClientSet := smiTrafficSpecClient.NewForConfigOrDie(smiKubeConfig)
	smiTrafficTargetClientSet := smiTrafficTargetClient.NewForConfigOrDie(smiKubeConfig)

	var backpressureClientSet *backpressureClient.Clientset
	if featureflags.IsBackpressureEnabled() {
		backpressureClientSet = backpressureClient.NewForConfigOrDie(smiKubeConfig)
	}

	client := newSMIClient(
		kubeClient,
		smiTrafficSplitClientSet,
		smiTrafficSpecClientSet,
		smiTrafficTargetClientSet,
		backpressureClientSet,
		osmNamespace,
		namespaceController,
		kubernetesClientName,
	)

	err := client.run(stop)
	if err != nil {
		return client, errors.Errorf("Could not start %s client", kubernetesClientName)
	}
	return client, nil
}

func (c *Client) run(stop <-chan struct{}) error {
	log.Info().Msg("SMI Client started")
	var hasSynced []cache.InformerSynced

	if c.informers == nil {
		return errInitInformers
	}

	sharedInformers := map[string]cache.SharedInformer{
		"TrafficSplit":  c.informers.TrafficSplit,
		"Services":      c.informers.Services,
		"TrafficSpec":   c.informers.TrafficSpec,
		"TrafficTarget": c.informers.TrafficTarget,
	}

	if featureflags.IsBackpressureEnabled() {
		sharedInformers["Backpressure"] = c.informers.Backpressure
	}

	var names []string
	for name, informer := range sharedInformers {
		if informer == nil {
			continue
		}
		names = append(names, name)
		go informer.Run(stop)
		hasSynced = append(hasSynced, informer.HasSynced)
	}

	if !cache.WaitForCacheSync(stop, hasSynced...) {
		return errSyncingCaches
	}

	close(c.cacheSynced)

	return nil
}

pkg/catalog/catalog.go 中

初始化meshCatalog
func NewMeshCatalog(namespaceController namespace.Controller, kubeClient kubernetes.Interface, meshSpec smi.MeshSpec, certManager certificate.Manager, ingressMonitor ingress.Monitor, stop <-chan struct{}, cfg configurator.Configurator, endpointsProviders ...endpoint.Provider) *MeshCatalog {
	sc := MeshCatalog{
		endpointsProviders: endpointsProviders,
		meshSpec:           meshSpec,
		certManager:        certManager,
		ingressMonitor:     ingressMonitor,
		configurator:       cfg,
		expectedProxies:      make(map[certificate.CommonName]expectedProxy),
		connectedProxies:     make(map[certificate.CommonName]connectedProxy),
		disconnectedProxies:  make(map[certificate.CommonName]disconnectedProxy),
		announcementChannels: set.NewSet(),
		kubeClient: kubeClient,
		namespaceController: namespaceController,
	}

	for _, announcementChannel := range sc.getAnnouncementChannels() {
		sc.announcementChannels.Add(announcementChannel)

	}

	go sc.repeater()
	return &sc
}

func (mc *MeshCatalog) getAnnouncementChannels() []announcementChannel {
	ticking := make(chan interface{})
	announcementChannels := []announcementChannel{
		{"MeshSpec", mc.meshSpec.GetAnnouncementsChannel()},
		{"CertManager", mc.certManager.GetAnnouncementsChannel()},
		{"IngressMonitor", mc.ingressMonitor.GetAnnouncementsChannel()},
		{"Ticker", ticking},
		{"Namespace", mc.namespaceController.GetAnnouncementsChannel()},
	}
	for _, ep := range mc.endpointsProviders {
		annCh := announcementChannel{ep.GetID(), ep.GetAnnouncementsChannel()}
		announcementChannels = append(announcementChannels, annCh)
	}

	go func() {
		ticker := time.NewTicker(updateAtLeastEvery)
		ticking <- ticker.C
	}()
	return announcementChannels
}

pkg/catalog/repeater.go 中

监听所有变化通知到所有envoy proxy,然后下发
func (mc *MeshCatalog) repeater() {
	lastUpdateAt := time.Now().Add(-1 * updateAtMostEvery)
	for {
		cases, caseNames := mc.getCases()
		for {
			if chosenIdx, message, ok := reflect.Select(cases); ok {
				delta := time.Since(lastUpdateAt)
				if delta >= updateAtMostEvery {
					mc.broadcast(message)
					lastUpdateAt = time.Now()
				}
			}
		}
	}
}

func (mc *MeshCatalog) getCases() ([]reflect.SelectCase, []string) {
	var caseNames []string
	var cases []reflect.SelectCase
	for _, channelInterface := range mc.announcementChannels.ToSlice() {
		annCh := channelInterface.(announcementChannel)
		cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(annCh.channel)})
		caseNames = append(caseNames, annCh.announcer)
	}
	return cases, caseNames
}

func (mc *MeshCatalog) broadcast(message interface{}) {
	mc.connectedProxiesLock.Lock()
	for _, connectedEnvoy := range mc.connectedProxies {
		select {
		case connectedEnvoy.proxy.GetAnnouncementsChannel() <- message:
		default:
		}
	}
	mc.connectedProxiesLock.Unlock()
}

pkg/ingress/client.go 中

监控ingress变化
func NewIngressClient(kubeClient kubernetes.Interface, namespaceController namespace.Controller, stop chan struct{}, cfg configurator.Configurator) (Monitor, error) {
	informerFactory := informers.NewSharedInformerFactory(kubeClient, k8s.DefaultKubeEventResyncInterval)
	informer := informerFactory.Extensions().V1beta1().Ingresses().Informer()

	client := Client{
		informer:            informer,
		cache:               informer.GetStore(),
		cacheSynced:         make(chan interface{}),
		announcements:       make(chan interface{}),
		namespaceController: namespaceController,
	}

	shouldObserve := func(obj interface{}) bool {
		ns := reflect.ValueOf(obj).Elem().FieldByName("ObjectMeta").FieldByName("Namespace").String()
		return namespaceController.IsMonitoredNamespace(ns)
	}
	informer.AddEventHandler(k8s.GetKubernetesEventHandlers("Ingress", "Kubernetes", client.announcements, shouldObserve))

	if err := client.run(stop); err != nil {
		log.Error().Err(err).Msg("Could not start Kubernetes Ingress client")
		return nil, err
	}

	return client, nil
}

func (c *Client) run(stop <-chan struct{}) error {
	if c.informer == nil {
		return errInitInformers
	}

	go c.informer.Run(stop)
	if !cache.WaitForCacheSync(stop, c.informer.HasSynced) {
		return errSyncingCaches
	}

	close(c.cacheSynced)

	return nil
}

pkg/endpoint/providers/kube/client.go 中

监控endpoint/deployment变化
func NewProvider(kubeClient kubernetes.Interface, namespaceController namespace.Controller, stop chan struct{}, providerIdent string, cfg configurator.Configurator) (*Client, error) {
	informerFactory := informers.NewSharedInformerFactory(kubeClient, k8s.DefaultKubeEventResyncInterval)

	informerCollection := InformerCollection{
		Endpoints:   informerFactory.Core().V1().Endpoints().Informer(),
		Deployments: informerFactory.Apps().V1().Deployments().Informer(),
	}

	cacheCollection := CacheCollection{
		Endpoints:   informerCollection.Endpoints.GetStore(),
		Deployments: informerCollection.Deployments.GetStore(),
	}

	client := Client{
		providerIdent:       providerIdent,
		kubeClient:          kubeClient,
		informers:           &informerCollection,
		caches:              &cacheCollection,
		cacheSynced:         make(chan interface{}),
		announcements:       make(chan interface{}),
		namespaceController: namespaceController,
	}

	shouldObserve := func(obj interface{}) bool {
		ns := reflect.ValueOf(obj).Elem().FieldByName("ObjectMeta").FieldByName("Namespace").String()
		return namespaceController.IsMonitoredNamespace(ns)
	}
	informerCollection.Endpoints.AddEventHandler(k8s.GetKubernetesEventHandlers("Endpoints", "Kubernetes", client.announcements, shouldObserve))
	informerCollection.Deployments.AddEventHandler(k8s.GetKubernetesEventHandlers("Deployments", "Kubernetes", client.announcements, shouldObserve))

	if err := client.run(stop); err != nil {
		return nil, errors.Errorf("Failed to start Kubernetes EndpointProvider client: %+v", err)
	}

	return &client, nil
}

func (c *Client) run(stop <-chan struct{}) error {
	var hasSynced []cache.InformerSynced

	if c.informers == nil {
		return errInitInformers
	}

	sharedInformers := map[string]cache.SharedInformer{
		"Endpoints":   c.informers.Endpoints,
		"Deployments": c.informers.Deployments,
	}

	var names []string
	for name, informer := range sharedInformers {
		if informer == nil {
			continue
		}
		names = append(names, name)
		go informer.Run(stop)
		hasSynced = append(hasSynced, informer.HasSynced)
	}

	if !cache.WaitForCacheSync(stop, hasSynced...) {
		return errSyncingCaches
	}

	close(c.cacheSynced)

	return nil
}

pkg/namespace/client.go 中

监控namespace变化
func NewNamespaceController(kubeClient kubernetes.Interface, meshName string, stop chan struct{}) Controller {
	monitorNamespaceLabel := map[string]string{MonitorLabel: meshName}
	labelSelector := fields.SelectorFromSet(monitorNamespaceLabel).String()
	option := informers.WithTweakListOptions(func(opt *metav1.ListOptions) {
		opt.LabelSelector = labelSelector
	})
	informerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, option)
	informer := informerFactory.Core().V1().Namespaces().Informer()

	client := Client{
		informer:      informer,
		cache:         informer.GetStore(),
		cacheSynced:   make(chan interface{}),
		announcements: make(chan interface{}),
	}

	if err := client.run(stop); err != nil {
		log.Fatal().Err(err).Msg("Could not start Kubernetes Namespaces client")
	}

	informer.AddEventHandler(k8s.GetKubernetesEventHandlers("Namespace", "NamespaceClient", client.announcements, nil))

	return client
}

func (c *Client) run(stop <-chan struct{}) error {

	if c.informer == nil {
		return errInitInformers
	}

	go c.informer.Run(stop)
	if !cache.WaitForCacheSync(stop, c.informer.HasSynced) {
		return errSyncingCaches
	}

	close(c.cacheSynced)

	return nil
}

拿 eds 举例 pkg/envoy/eds/response.go 中

获取endpoint
func NewResponse(ctx context.Context, catalog catalog.MeshCataloger, proxy *envoy.Proxy, request *xds_discovery.DiscoveryRequest, cfg configurator.Configurator) (*xds_discovery.DiscoveryResponse, error) {
	svc, err := catalog.GetServiceFromEnvoyCertificate(proxy.GetCommonName())
    ...
	proxyServiceName := *svc

	allTrafficPolicies, err := catalog.ListTrafficPolicies(proxyServiceName)
	...

	allServicesEndpoints := make(map[service.MeshService][]endpoint.Endpoint)
	for _, trafficPolicy := range allTrafficPolicies {
		isSourceService := trafficPolicy.Source.Equals(proxyServiceName)
		if isSourceService {
			destService := trafficPolicy.Destination
			serviceEndpoints, err := catalog.ListEndpointsForService(destService)
			if err != nil {
				return nil, err
			}
			allServicesEndpoints[destService] = serviceEndpoints
		}
	}
    ...
	var protos []*any.Any
	for serviceName, serviceEndpoints := range allServicesEndpoints {
		loadAssignment := cla.NewClusterLoadAssignment(serviceName, serviceEndpoints)
		
		proto, err := ptypes.MarshalAny(loadAssignment)
		...
		protos = append(protos, proto)
	}

	resp := &xds_discovery.DiscoveryResponse{
		Resources: protos,
		TypeUrl:   string(envoy.TypeEDS),
	}
	return resp, nil
}