diff --git a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml index b26ee9e27f7..897d2b27eff 100644 --- a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml +++ b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml @@ -56,6 +56,12 @@ metadata: --- apiVersion: v1 kind: ServiceAccount +metadata: + name: yurt-manager-hubleaderconfig-controller + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount metadata: name: yurt-manager-load-balancer-set-controller namespace: {{ .Release.Namespace }} @@ -476,12 +482,34 @@ rules: - apiGroups: - apps.openyurt.io resources: - - nodepool - - nodepool/status + - nodepools + - nodepools/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: yurt-manager-hubleaderconfig-controller +rules: +- apiGroups: + - "" + resources: + - configmaps verbs: + - create - get - patch - update +- apiGroups: + - apps.openyurt.io + resources: + - nodepools + - nodepools/status + verbs: + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1042,6 +1070,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: yurt-manager-hubleaderconfig-controller-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: yurt-manager-hubleaderconfig-controller +subjects: +- kind: ServiceAccount + name: yurt-manager-hubleaderconfig-controller + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: yurt-manager-load-balancer-set-controller-binding roleRef: diff --git a/cmd/yurt-manager/app/options/hubleaderconfigcontroller.go b/cmd/yurt-manager/app/options/hubleaderconfigcontroller.go new file mode 100644 index 00000000000..809923c64ae --- /dev/null +++ b/cmd/yurt-manager/app/options/hubleaderconfigcontroller.go @@ -0,0 +1,71 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleaderconfig/config" +) + +type HubLeaderConfigControllerOptions struct { + *config.HubLeaderConfigControllerConfiguration +} + +func NewHubLeaderConfigControllerOptions(hubleaderNamespace string) *HubLeaderConfigControllerOptions { + return &HubLeaderConfigControllerOptions{ + &config.HubLeaderConfigControllerConfiguration{ + ConcurrentHubLeaderConfigWorkers: 3, + HubLeaderNamespace: hubleaderNamespace, + }, + } +} + +// AddFlags adds flags related to hubleader for yurt-manager to the specified FlagSet. +func (h *HubLeaderConfigControllerOptions) AddFlags(fs *pflag.FlagSet) { + if h == nil { + return + } + + fs.Int32Var( + &h.ConcurrentHubLeaderConfigWorkers, + "concurrent-hubleaderconfig-workers", + h.ConcurrentHubLeaderConfigWorkers, + "The number of nodepool objects that are allowed to reconcile concurrently.", + ) +} + +// ApplyTo fills up hubleader config with options. +func (h *HubLeaderConfigControllerOptions) ApplyTo(cfg *config.HubLeaderConfigControllerConfiguration) error { + if h == nil { + return nil + } + + cfg.ConcurrentHubLeaderConfigWorkers = h.ConcurrentHubLeaderConfigWorkers + cfg.HubLeaderNamespace = h.HubLeaderNamespace + + return nil +} + +// Validate checks validation of HubLeaderControllerOptions. +func (h *HubLeaderConfigControllerOptions) Validate() []error { + if h == nil { + return nil + } + errs := []error{} + return errs +} diff --git a/cmd/yurt-manager/app/options/options.go b/cmd/yurt-manager/app/options/options.go index 207d528eb99..e6652882420 100644 --- a/cmd/yurt-manager/app/options/options.go +++ b/cmd/yurt-manager/app/options/options.go @@ -46,13 +46,14 @@ type YurtManagerOptions struct { GatewayInternalSvcController *GatewayInternalSvcControllerOptions GatewayPublicSvcController *GatewayPublicSvcControllerOptions HubLeaderController *HubLeaderControllerOptions + HubLeaderConfigController *HubLeaderConfigControllerOptions } // NewYurtManagerOptions creates a new YurtManagerOptions with a default config. func NewYurtManagerOptions() (*YurtManagerOptions, error) { - + genericOptions := NewGenericOptions() s := YurtManagerOptions{ - Generic: NewGenericOptions(), + Generic: genericOptions, DelegateLeaseController: NewDelegateLeaseControllerOptions(), PodBindingController: NewPodBindingControllerOptions(), DaemonPodUpdaterController: NewDaemonPodUpdaterControllerOptions(), @@ -73,6 +74,7 @@ func NewYurtManagerOptions() (*YurtManagerOptions, error) { GatewayInternalSvcController: NewGatewayInternalSvcControllerOptions(), GatewayPublicSvcController: NewGatewayPublicSvcControllerOptions(), HubLeaderController: NewHubLeaderControllerOptions(), + HubLeaderConfigController: NewHubLeaderConfigControllerOptions(genericOptions.WorkingNamespace), } return &s, nil @@ -101,6 +103,7 @@ func (y *YurtManagerOptions) Flags(allControllers, disabledByDefaultControllers y.GatewayInternalSvcController.AddFlags(fss.FlagSet("gatewayinternalsvc controller")) y.GatewayPublicSvcController.AddFlags(fss.FlagSet("gatewaypublicsvc controller")) y.HubLeaderController.AddFlags(fss.FlagSet("hubleader controller")) + y.HubLeaderConfigController.AddFlags(fss.FlagSet("hubleaderconfig controller")) return fss } @@ -128,6 +131,7 @@ func (y *YurtManagerOptions) Validate(allControllers []string, controllerAliases errs = append(errs, y.GatewayInternalSvcController.Validate()...) errs = append(errs, y.GatewayPublicSvcController.Validate()...) errs = append(errs, y.HubLeaderController.Validate()...) + errs = append(errs, y.HubLeaderConfigController.Validate()...) return utilerrors.NewAggregate(errs) } @@ -196,6 +200,9 @@ func (y *YurtManagerOptions) ApplyTo(c *config.Config, controllerAliases map[str if err := y.HubLeaderController.ApplyTo(&c.ComponentConfig.HubLeaderController); err != nil { return err } + if err := y.HubLeaderConfigController.ApplyTo(&c.ComponentConfig.HubLeaderConfigController); err != nil { + return err + } return nil } diff --git a/cmd/yurt-manager/names/controller_names.go b/cmd/yurt-manager/names/controller_names.go index c0df6e62e7c..3a30c6e7ceb 100644 --- a/cmd/yurt-manager/names/controller_names.go +++ b/cmd/yurt-manager/names/controller_names.go @@ -37,6 +37,7 @@ const ( NodeBucketController = "node-bucket-controller" LoadBalancerSetController = "load-balancer-set-controller" HubLeaderController = "hubleader-controller" + HubLeaderConfigController = "hubleaderconfig-controller" ) func YurtManagerControllerAliases() map[string]string { @@ -61,5 +62,7 @@ func YurtManagerControllerAliases() map[string]string { "nodelifecycle": NodeLifeCycleController, "nodebucket": NodeBucketController, "loadbalancerset": LoadBalancerSetController, + "hubleader": HubLeaderController, + "hubleaderconfig": HubLeaderConfigController, } } diff --git a/pkg/projectinfo/projectinfo.go b/pkg/projectinfo/projectinfo.go index ddb701562a7..c13f433bd2e 100644 --- a/pkg/projectinfo/projectinfo.go +++ b/pkg/projectinfo/projectinfo.go @@ -128,6 +128,16 @@ func GetNodePoolLabel() string { return nodePoolLabelKey } +// GetHubleaderConfigMapName returns the name of the leader ConfigMap for the nodepool +func GetHubleaderConfigMapName(nodepoolName string) string { + return fmt.Sprintf("leader-hub-%s", nodepoolName) +} + +// GetHubLeaderConfigMapLabel returns the label of the leader ConfigMap for the nodepool +func GetHubLeaderConfigMapLabel() string { + return fmt.Sprintf("%s/configmap-name", labelPrefix) +} + // Info contains version information. type Info struct { GitVersion string `json:"gitVersion"` diff --git a/pkg/yurtmanager/controller/apis/config/types.go b/pkg/yurtmanager/controller/apis/config/types.go index 7e7a1b5667e..17ea7c7d7f2 100644 --- a/pkg/yurtmanager/controller/apis/config/types.go +++ b/pkg/yurtmanager/controller/apis/config/types.go @@ -24,6 +24,7 @@ import ( csrapproverconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/csrapprover/config" daemonpodupdaterconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/daemonpodupdater/config" hubleaderconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" + hubleadercfgconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleaderconfig/config" loadbalancersetconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/config" nodebucketconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodebucket/config" nodepoolconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodepool/config" @@ -106,6 +107,9 @@ type YurtManagerConfiguration struct { // HubLeaderController holds configuration for HubLeaderController related features. HubLeaderController hubleaderconfig.HubLeaderControllerConfiguration + + // HubLeaderConfigController holds configuration for HubLeaderController related features. + HubLeaderConfigController hubleadercfgconfig.HubLeaderConfigControllerConfiguration } type GenericConfiguration struct { diff --git a/pkg/yurtmanager/controller/base/controller.go b/pkg/yurtmanager/controller/base/controller.go index e29379c32c2..f5c1a72398c 100644 --- a/pkg/yurtmanager/controller/base/controller.go +++ b/pkg/yurtmanager/controller/base/controller.go @@ -32,6 +32,8 @@ import ( "github.com/openyurtio/openyurt/cmd/yurt-manager/names" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/csrapprover" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/daemonpodupdater" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleaderconfig" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodebucket" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodelifecycle" @@ -97,6 +99,8 @@ func NewControllerInitializers() map[string]InitFunc { register(names.NodeLifeCycleController, nodelifecycle.Add) register(names.NodeBucketController, nodebucket.Add) register(names.LoadBalancerSetController, loadbalancerset.Add) + register(names.HubLeaderController, hubleader.Add) + register(names.HubLeaderConfigController, hubleaderconfig.Add) return controllers } @@ -134,7 +138,11 @@ func NewControllerInitializers() map[string]InitFunc { func SetupWithManager(ctx context.Context, c *config.CompletedConfig, m manager.Manager) error { for controllerName, fn := range NewControllerInitializers() { - if !app.IsControllerEnabled(controllerName, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) { + if !app.IsControllerEnabled( + controllerName, + ControllersDisabledByDefault, + c.ComponentConfig.Generic.Controllers, + ) { klog.Warningf("Controller %v is disabled", controllerName) continue } @@ -150,8 +158,16 @@ func SetupWithManager(ctx context.Context, c *config.CompletedConfig, m manager. } } - if app.IsControllerEnabled(names.NodeLifeCycleController, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) || - app.IsControllerEnabled(names.PodBindingController, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) { + if app.IsControllerEnabled( + names.NodeLifeCycleController, + ControllersDisabledByDefault, + c.ComponentConfig.Generic.Controllers, + ) || + app.IsControllerEnabled( + names.PodBindingController, + ControllersDisabledByDefault, + c.ComponentConfig.Generic.Controllers, + ) { // Register spec.NodeName field indexers if err := m.GetFieldIndexer().IndexField(context.TODO(), &v1.Pod{}, "spec.nodeName", func(rawObj client.Object) []string { pod, ok := rawObj.(*v1.Pod) diff --git a/pkg/yurtmanager/controller/hubleader/hubleader_controller.go b/pkg/yurtmanager/controller/hubleader/hubleader_controller.go index 3a70e860b6e..4f116855441 100644 --- a/pkg/yurtmanager/controller/hubleader/hubleader_controller.go +++ b/pkg/yurtmanager/controller/hubleader/hubleader_controller.go @@ -41,6 +41,7 @@ import ( "github.com/openyurtio/openyurt/pkg/projectinfo" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" + nodepoolutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/nodepool" ) var ( @@ -131,8 +132,8 @@ type ReconcileHubLeader struct { Configuration config.HubLeaderControllerConfiguration } -// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepool,verbs=get;update;patch -// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepool/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepools,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepools/status,verbs=get;update;patch // Reconcile reads that state of the cluster for a HubLeader object and makes changes based on the state read // and what is in the HubLeader.Spec @@ -235,7 +236,7 @@ func (r *ReconcileHubLeader) reconcileHubLeader(ctx context.Context, nodepool *a updatedNodePool.Status.LeaderEndpoints = updatedLeaders - if !hasLeadersChanged(nodepool.Status.LeaderEndpoints, updatedNodePool.Status.LeaderEndpoints) { + if !nodepoolutil.HasSliceContentChanged(nodepool.Status.LeaderEndpoints, updatedNodePool.Status.LeaderEndpoints) { return nil } @@ -248,27 +249,6 @@ func (r *ReconcileHubLeader) reconcileHubLeader(ctx context.Context, nodepool *a return nil } -// hasLeadersChanged checks if the leader endpoints have changed -func hasLeadersChanged(old, new []appsv1beta2.Leader) bool { - if len(old) != len(new) { - return true - } - - oldSet := make(map[appsv1beta2.Leader]struct{}, len(old)) - - for i := range old { - oldSet[old[i]] = struct{}{} - } - - for i := range new { - if _, ok := oldSet[new[i]]; !ok { - return true - } - } - - return false -} - // electNLeaders elects N leaders from the candidates based on the strategy func electNLeaders( strategy string, diff --git a/pkg/yurtmanager/controller/hubleaderconfig/config/types.go b/pkg/yurtmanager/controller/hubleaderconfig/config/types.go new file mode 100644 index 00000000000..8fa50eb05ab --- /dev/null +++ b/pkg/yurtmanager/controller/hubleaderconfig/config/types.go @@ -0,0 +1,24 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// HubLeaderConfigControllerConfiguration contains elements describing HubLeaderConfigController. +type HubLeaderConfigControllerConfiguration struct { + ConcurrentHubLeaderConfigWorkers int32 + + HubLeaderNamespace string +} diff --git a/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller.go b/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller.go new file mode 100644 index 00000000000..c8cbd1536c5 --- /dev/null +++ b/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller.go @@ -0,0 +1,239 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hubleaderconfig + +import ( + "context" + "fmt" + "maps" + "strconv" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/util/slice" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + yurtClient "github.com/openyurtio/openyurt/cmd/yurt-manager/app/client" + appconfig "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" + "github.com/openyurtio/openyurt/cmd/yurt-manager/names" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" + "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleaderconfig/config" + nodepoolutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/nodepool" +) + +var ( + controllerKind = appsv1beta2.SchemeGroupVersion.WithKind("Nodepool") +) + +// Add creates a new HubLeader config Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, cfg *appconfig.CompletedConfig, mgr manager.Manager) error { + klog.Infof("hubleaderconfig-controller add controller %s", controllerKind.String()) + + reconciler := &ReconcileHubLeaderConfig{ + Client: yurtClient.GetClientByControllerNameOrDie(mgr, names.HubLeaderConfigController), + recorder: mgr.GetEventRecorderFor(names.HubLeaderConfigController), + Configuration: cfg.ComponentConfig.HubLeaderConfigController, + } + + // Create a new controller + c, err := controller.New( + names.HubLeaderConfigController, + mgr, + controller.Options{ + Reconciler: reconciler, + MaxConcurrentReconciles: int(cfg.ComponentConfig.HubLeaderController.ConcurrentHubLeaderWorkers), + }, + ) + if err != nil { + return err + } + + poolPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + _, ok := e.Object.(*appsv1beta2.NodePool) + return ok + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldPool, ok := e.ObjectOld.(*appsv1beta2.NodePool) + if !ok { + return false + } + newPool, ok := e.ObjectNew.(*appsv1beta2.NodePool) + if !ok { + return false + } + + // Only update if the leader has changed or the pool scope metadata has changed + return nodepoolutil.HasSliceContentChanged( + oldPool.Status.LeaderEndpoints, + newPool.Status.LeaderEndpoints, + ) || nodepoolutil.HasSliceContentChanged( + oldPool.Spec.PoolScopeMetadata, + newPool.Spec.PoolScopeMetadata, + ) + }, + } + + // Watch for changes to NodePool + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &appsv1beta2.NodePool{}, + &handler.EnqueueRequestForObject{}, + poolPredicate, + ), + ) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileHubLeaderConfig{} + +// ReconcileHubLeaderConfig reconciles a HubLeader object +type ReconcileHubLeaderConfig struct { + client.Client + recorder record.EventRecorder + Configuration config.HubLeaderConfigControllerConfiguration +} + +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepools,verbs=get +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepools/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;update;patch;create + +// Reconcile reads that state of the cluster nodepool leader status and updates the leader configmap object +func (r *ReconcileHubLeaderConfig) Reconcile( + ctx context.Context, + request reconcile.Request, +) (reconcile.Result, error) { + klog.Infof("Reconcile NodePool leader %s/%s", request.Namespace, request.Name) + + // Fetch the NodePool instance + nodepool := &appsv1beta2.NodePool{} + if err := r.Get(ctx, request.NamespacedName, nodepool); err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + if !nodepool.ObjectMeta.DeletionTimestamp.IsZero() { + // If the NodePool is being deleted, delete the leader configmap + configMapName := fmt.Sprintf("leader-hub-%s", nodepool.Name) + err := r.Client.Delete(ctx, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: r.Configuration.HubLeaderNamespace, + }, + }) + if err != nil && !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // Reconcile the hub leader config + if err := r.reconcileHubLeaderConfig(ctx, nodepool); err != nil { + r.recorder.Eventf(nodepool, v1.EventTypeWarning, "ReconcileError", "Failed to reconcile NodePool: %v", err) + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *ReconcileHubLeaderConfig) reconcileHubLeaderConfig( + ctx context.Context, + nodepool *appsv1beta2.NodePool, +) error { + configMapName := projectinfo.GetHubleaderConfigMapName(nodepool.Name) + + // Get the leader ConfigMap for the nodepool + leaderConfigMap := &v1.ConfigMap{} + err := r.Get(ctx, types.NamespacedName{ + Name: configMapName, + Namespace: r.Configuration.HubLeaderNamespace, + }, leaderConfigMap) + if err != nil && !errors.IsNotFound(err) { + // Error retrieving the ConfigMap + return err + } + + // Add leader endpoints + leaders := make([]string, 0, len(nodepool.Status.LeaderEndpoints)) + for _, leader := range nodepool.Status.LeaderEndpoints { + leaders = append(leaders, leader.NodeName+"/"+leader.Address) + } + + // Add pool scope metadata + poolScopedMetadata := make([]string, 0, len(nodepool.Spec.PoolScopeMetadata)) + for _, metadata := range nodepool.Spec.PoolScopeMetadata { + poolScopedMetadata = append(poolScopedMetadata, getGVKString(metadata)) + } + + // Prepare data + data := map[string]string{ + "leaders": strings.Join(slice.SortStrings(leaders), ","), + "pool-scoped-metadata": strings.Join(slice.SortStrings(poolScopedMetadata), ","), + "interconnectivity": strconv.FormatBool(nodepool.Spec.InterConnectivity), + } + + // If the ConfigMap does not exist, create it + if errors.IsNotFound(err) { + leaderConfigMap = &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: r.Configuration.HubLeaderNamespace, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): configMapName, + }, + }, + Data: data, + } + + // Create the ConfigMap resource + return r.Create(ctx, leaderConfigMap) + } + + if !maps.Equal(leaderConfigMap.Data, data) { + // Update the ConfigMap resource + leaderConfigMap.Data = data + return r.Update(ctx, leaderConfigMap) + } + + return nil +} + +// getGVKString returns a string representation of the GroupVersionKind +func getGVKString(gvk metav1.GroupVersionKind) string { + return fmt.Sprintf("%s/%s/%s", gvk.Group, gvk.Version, gvk.Kind) +} diff --git a/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller_test.go b/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller_test.go new file mode 100644 index 00000000000..7dc5ecfa15c --- /dev/null +++ b/pkg/yurtmanager/controller/hubleaderconfig/hubleaderconfig_controller_test.go @@ -0,0 +1,418 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hubleaderconfig + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openyurtio/openyurt/pkg/apis" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" + "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleaderconfig/config" +) + +func TestReconcile(t *testing.T) { + scheme := runtime.NewScheme() + + err := clientgoscheme.AddToScheme(scheme) + require.NoError(t, err) + err = apis.AddToScheme(scheme) + require.NoError(t, err) + + testCases := map[string]struct { + pool *appsv1beta2.NodePool + existingConfigMap *v1.ConfigMap + expectedConfigMap *v1.ConfigMap + expectErr bool + }{ + "one endpoint": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + }, + }, + }, + existingConfigMap: nil, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-hangzhou", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-hangzhou", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1", + "pool-scoped-metadata": "core/v1/Service,discovery.k8s.io/v1/EndpointSlice", + "interconnectivity": "true", + }, + }, + expectErr: false, + }, + "multiple endpoints": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + }, + }, + existingConfigMap: nil, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-shanghai", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1,node2/10.0.0.2", + "pool-scoped-metadata": "core/v1/Service,discovery.k8s.io/v1/EndpointSlice", + "interconnectivity": "true", + }, + }, + expectErr: false, + }, + "config map need update": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + }, + }, + existingConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-shanghai", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1", + "pool-scoped-metadata": "core/v1/Service", + "interconnectivity": "true", + }, + }, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-shanghai", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1,node2/10.0.0.2", + "pool-scoped-metadata": "core/v1/Service,discovery.k8s.io/v1/EndpointSlice", + "interconnectivity": "true", + }, + }, + expectErr: false, + }, + "no endpoints": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "beijing", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "beijing", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{}, + }, + }, + existingConfigMap: nil, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-beijing", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing", + }, + }, + Data: map[string]string{ + "leaders": "", + "pool-scoped-metadata": "core/v1/Service,discovery.k8s.io/v1/EndpointSlice", + "interconnectivity": "true", + }, + }, + expectErr: false, + }, + "no pool scope metadata": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "beijing", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "beijing", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + }, + }, + existingConfigMap: nil, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-beijing", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1,node2/10.0.0.2", + "pool-scoped-metadata": "", + "interconnectivity": "true", + }, + }, + expectErr: false, + }, + "no interconnectivity": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "beijing", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "beijing", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: false, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []appsv1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + }, + }, + existingConfigMap: nil, + expectedConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "leader-hub-beijing", + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing", + }, + }, + Data: map[string]string{ + "leaders": "node1/10.0.0.1,node2/10.0.0.2", + "pool-scoped-metadata": "", + "interconnectivity": "false", + }, + }, + expectErr: false, + }, + } + + ctx := context.TODO() + for k, tc := range testCases { + t.Run(k, func(t *testing.T) { + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.pool). + WithStatusSubresource(tc.pool) + + // Add existing ConfigMap if it exists + if tc.existingConfigMap != nil { + c.WithObjects(tc.existingConfigMap) + } + + r := &ReconcileHubLeaderConfig{ + Client: c.Build(), + Configuration: config.HubLeaderConfigControllerConfiguration{ + HubLeaderNamespace: metav1.NamespaceSystem, + }, + recorder: record.NewFakeRecorder(1000), + } + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.pool.Name}} + _, err := r.Reconcile(ctx, req) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + var actualConfig v1.ConfigMap + if tc.expectedConfigMap == nil { + err = r.Get(ctx, types.NamespacedName{ + Name: "leader-hub-" + tc.pool.Name, + Namespace: metav1.NamespaceSystem, + }, &actualConfig) + require.True(t, errors.IsNotFound(err)) + return + } + + err = r.Get(ctx, types.NamespacedName{ + Name: tc.expectedConfigMap.Name, + Namespace: tc.expectedConfigMap.Namespace, + }, &actualConfig) + require.NoError(t, err) + + // Reset resource version - it's not important for the test + actualConfig.ResourceVersion = "" + + require.Equal(t, *tc.expectedConfigMap, actualConfig) + }) + } +} diff --git a/pkg/yurtmanager/controller/util/nodepool/nodepool.go b/pkg/yurtmanager/controller/util/nodepool/nodepool.go new file mode 100644 index 00000000000..d8c2172c8d6 --- /dev/null +++ b/pkg/yurtmanager/controller/util/nodepool/nodepool.go @@ -0,0 +1,37 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodepool + +// HasSliceContentChanged checks if the content of the old and new slices has changed. +func HasSliceContentChanged[T comparable](old, new []T) bool { + if len(old) != len(new) { + return true + } + + oldSet := make(map[T]struct{}, len(old)) + for _, v := range old { + oldSet[v] = struct{}{} + } + + for _, v := range new { + if _, ok := oldSet[v]; !ok { + return true + } + } + + return false +} diff --git a/pkg/yurtmanager/controller/util/nodepool/nodepool_test.go b/pkg/yurtmanager/controller/util/nodepool/nodepool_test.go new file mode 100644 index 00000000000..6076c517b59 --- /dev/null +++ b/pkg/yurtmanager/controller/util/nodepool/nodepool_test.go @@ -0,0 +1,187 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodepool_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/nodepool" +) + +func TestHasSliceChanged(t *testing.T) { + tests := []struct { + name string + old []v1beta2.Leader + new []v1beta2.Leader + expected bool + }{ + { + name: "old and new are the same", + old: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + expected: false, + }, + { + name: "new has extra element", + old: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + }, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + expected: true, + }, + { + name: "old has extra element", + old: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + }, + expected: true, + }, + { + name: "new and old are different", + old: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.3", + }, + { + NodeName: "node2", + Address: "10.0.0.4", + }, + }, + expected: true, + }, + + { + name: "old and new are the same but in different order", + old: []v1beta2.Leader{ + { + NodeName: "node2", + Address: "10.0.0.2", + }, + { + NodeName: "node1", + Address: "10.0.0.1", + }, + }, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + expected: false, + }, + { + name: "old is nil", + old: nil, + new: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + expected: true, + }, + { + name: "new is nil", + old: []v1beta2.Leader{ + { + NodeName: "node1", + Address: "10.0.0.1", + }, + { + NodeName: "node2", + Address: "10.0.0.2", + }, + }, + new: nil, + expected: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := nodepool.HasSliceContentChanged(tc.old, tc.new) + assert.Equal(t, tc.expected, actual) + }) + } + +}