From 5d11aa7145a9bc415142a790a38924e4a7433a48 Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Fri, 8 Nov 2024 18:22:08 +0100 Subject: [PATCH] Add e2e test to cover crossNamespace provisionging - Prevent duplicate CC creation in template processing - Test runtimextension integration - Add changes to CC rebase e2e test - Add a note CLUSTER_CLASS_NAMESPACE to the clusterctl contract Signed-off-by: Danil-Grigorev Co-authored-by: Christian Schlotter --- cmd/clusterctl/client/clusterclass.go | 20 +- cmd/clusterctl/client/clusterclass_test.go | 46 +++- cmd/clusterctl/client/config.go | 2 +- cmd/clusterctl/client/repository/template.go | 6 +- .../providers/contracts/clusterctl.md | 6 + test/e2e/cluster_upgrade_runtimesdk.go | 31 ++- test/e2e/cluster_upgrade_runtimesdk_test.go | 34 +++ test/e2e/clusterclass_changes.go | 199 +++++++++++++++--- .../main/bases/cluster-with-topology.yaml | 1 + .../cluster-runtimesdk.yaml | 1 + test/e2e/quick_start.go | 35 ++- .../clusterclass-quick-start-runtimesdk.yaml | 6 +- test/framework/ownerreference_helpers.go | 2 +- 13 files changed, 329 insertions(+), 60 deletions(-) diff --git a/cmd/clusterctl/client/clusterclass.go b/cmd/clusterctl/client/clusterclass.go index 58123d83caeb..4bcb0b379fbe 100644 --- a/cmd/clusterctl/client/clusterclass.go +++ b/cmd/clusterctl/client/clusterclass.go @@ -23,6 +23,8 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -34,7 +36,7 @@ import ( // addClusterClassIfMissing returns a Template that includes the base template and adds any cluster class definitions that // are references in the template. If the cluster class referenced already exists in the cluster it is not added to the // template. -func addClusterClassIfMissing(ctx context.Context, template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) { +func addClusterClassIfMissing(ctx context.Context, template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, listVariablesOnly bool) (Template, error) { classes, err := clusterClassNamesFromTemplate(template) if err != nil { return nil, err @@ -44,7 +46,7 @@ func addClusterClassIfMissing(ctx context.Context, template Template, clusterCla return template, nil } - clusterClassesTemplate, err := fetchMissingClusterClassTemplates(ctx, clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly) + clusterClassesTemplate, err := fetchMissingClusterClassTemplates(ctx, clusterClassClient, clusterClient, classes, listVariablesOnly) if err != nil { return nil, err } @@ -62,8 +64,8 @@ func addClusterClassIfMissing(ctx context.Context, template Template, clusterCla // clusterClassNamesFromTemplate returns the list of ClusterClasses referenced // by clusters defined in the template. If not clusters are defined in the template // or if no cluster uses a cluster class it returns an empty list. -func clusterClassNamesFromTemplate(template Template) ([]string, error) { - classes := []string{} +func clusterClassNamesFromTemplate(template Template) ([]types.NamespacedName, error) { + classes := []types.NamespacedName{} // loop through all the objects and if the object is a cluster // check and see if cluster.spec.topology.class is defined. @@ -80,14 +82,14 @@ func clusterClassNamesFromTemplate(template Template) ([]string, error) { if cluster.Spec.Topology == nil { continue } - classes = append(classes, cluster.GetClassKey().Name) + classes = append(classes, cluster.GetClassKey()) } return classes, nil } // fetchMissingClusterClassTemplates returns a list of templates for ClusterClasses that do not yet exist // in the cluster. If the cluster is not initialized, all the ClusterClasses are added. -func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) { +func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []types.NamespacedName, listVariablesOnly bool) (Template, error) { // first check if the cluster is initialized. // If it is initialized: // For every ClusterClass check if it already exists in the cluster. @@ -118,7 +120,7 @@ func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient r templates := []repository.Template{} for _, class := range classes { if clusterInitialized { - exists, err := clusterClassExists(ctx, c, class, targetNamespace) + exists, err := clusterClassExists(ctx, c, class.Name, class.Namespace) if err != nil { return nil, err } @@ -128,7 +130,7 @@ func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient r } // The cluster is either not initialized or the ClusterClass does not yet exist in the cluster. // Fetch the cluster class to install. - clusterClassTemplate, err := clusterClassClient.Get(ctx, class, targetNamespace, listVariablesOnly) + clusterClassTemplate, err := clusterClassClient.Get(ctx, class.Name, class.Namespace, listVariablesOnly) if err != nil { return nil, errors.Wrapf(err, "failed to get the cluster class template for %q", class) } @@ -142,7 +144,7 @@ func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient r if exists, err := objExists(ctx, c, obj); err != nil { return nil, err } else if exists { - return nil, fmt.Errorf("%s(%s) already exists in the cluster", obj.GetName(), obj.GetObjectKind().GroupVersionKind()) + return nil, fmt.Errorf("%s(%s) already exists in the cluster", klog.KObj(&obj), obj.GetObjectKind().GroupVersionKind()) } } } diff --git a/cmd/clusterctl/client/clusterclass_test.go b/cmd/clusterctl/client/clusterclass_test.go index 912df08a9bdd..108a3a269c48 100644 --- a/cmd/clusterctl/client/clusterclass_test.go +++ b/cmd/clusterctl/client/clusterclass_test.go @@ -100,6 +100,7 @@ func TestAddClusterClassIfMissing(t *testing.T) { objs []client.Object clusterClassTemplateContent []byte targetNamespace string + clusterClassNamespace string listVariablesOnly bool wantClusterClassInTemplate bool wantError bool @@ -114,6 +115,28 @@ func TestAddClusterClassIfMissing(t *testing.T) { wantClusterClassInTemplate: true, wantError: false, }, + { + name: "should add the cluster class from a different namespace to the template if cluster is not initialized", + clusterInitialized: false, + objs: []client.Object{}, + targetNamespace: "ns1", + clusterClassNamespace: "ns2", + clusterClassTemplateContent: clusterClassYAML("ns2", "dev"), + listVariablesOnly: false, + wantClusterClassInTemplate: true, + wantError: false, + }, + { + name: "should add the cluster class form the same explicitly specified namespace to the template if cluster is not initialized", + clusterInitialized: false, + objs: []client.Object{}, + targetNamespace: "ns1", + clusterClassNamespace: "ns1", + clusterClassTemplateContent: clusterClassYAML("ns1", "dev"), + listVariablesOnly: false, + wantClusterClassInTemplate: true, + wantError: false, + }, { name: "should add the cluster class to the template if cluster is initialized and cluster class is not installed", clusterInitialized: true, @@ -189,17 +212,21 @@ func TestAddClusterClassIfMissing(t *testing.T) { clusterClassClient := repository1.ClusterClasses("v1.0.0") - clusterWithTopology := []byte(fmt.Sprintf("apiVersion: %s\n", clusterv1.GroupVersion.String()) + + clusterWithTopology := fmt.Sprintf("apiVersion: %s\n", clusterv1.GroupVersion.String()) + "kind: Cluster\n" + "metadata:\n" + " name: cluster-dev\n" + fmt.Sprintf(" namespace: %s\n", tt.targetNamespace) + "spec:\n" + " topology:\n" + - " class: dev") + " class: dev" + + if tt.clusterClassNamespace != "" { + clusterWithTopology = fmt.Sprintf("%s\n classNamespace: %s", clusterWithTopology, tt.clusterClassNamespace) + } baseTemplate, err := repository.NewTemplate(repository.TemplateInput{ - RawArtifact: clusterWithTopology, + RawArtifact: []byte(clusterWithTopology), ConfigVariablesClient: test.NewFakeVariableClient(), Processor: yaml.NewSimpleProcessor(), TargetNamespace: tt.targetNamespace, @@ -210,13 +237,22 @@ func TestAddClusterClassIfMissing(t *testing.T) { } g := NewWithT(t) - template, err := addClusterClassIfMissing(ctx, baseTemplate, clusterClassClient, cluster, tt.targetNamespace, tt.listVariablesOnly) + template, err := addClusterClassIfMissing(ctx, baseTemplate, clusterClassClient, cluster, tt.listVariablesOnly) if tt.wantError { g.Expect(err).To(HaveOccurred()) } else { if tt.wantClusterClassInTemplate { - g.Expect(template.Objs()).To(ContainElement(MatchClusterClass("dev", tt.targetNamespace))) + if tt.clusterClassNamespace == tt.targetNamespace { + g.Expect(template.Objs()).To(ContainElement(MatchClusterClass("dev", tt.targetNamespace))) + } else if tt.clusterClassNamespace != "" { + g.Expect(template.Objs()).To(ContainElement(MatchClusterClass("dev", tt.clusterClassNamespace))) + g.Expect(template.Objs()).ToNot(ContainElement(MatchClusterClass("dev", tt.targetNamespace))) + } else { + g.Expect(template.Objs()).To(ContainElement(MatchClusterClass("dev", tt.targetNamespace))) + g.Expect(template.Objs()).ToNot(ContainElement(MatchClusterClass("dev", tt.clusterClassNamespace))) + } } else { + g.Expect(template.Objs()).NotTo(ContainElement(MatchClusterClass("dev", tt.clusterClassNamespace))) g.Expect(template.Objs()).NotTo(ContainElement(MatchClusterClass("dev", tt.targetNamespace))) } } diff --git a/cmd/clusterctl/client/config.go b/cmd/clusterctl/client/config.go index cf915fa0d230..22ab04ac701e 100644 --- a/cmd/clusterctl/client/config.go +++ b/cmd/clusterctl/client/config.go @@ -344,7 +344,7 @@ func (c *clusterctlClient) getTemplateFromRepository(ctx context.Context, cluste clusterClassClient := repo.ClusterClasses(version) - template, err = addClusterClassIfMissing(ctx, template, clusterClassClient, cluster, targetNamespace, listVariablesOnly) + template, err = addClusterClassIfMissing(ctx, template, clusterClassClient, cluster, listVariablesOnly) if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/repository/template.go b/cmd/clusterctl/client/repository/template.go index 5ef36dabd5bc..942a4705394c 100644 --- a/cmd/clusterctl/client/repository/template.go +++ b/cmd/clusterctl/client/repository/template.go @@ -17,8 +17,6 @@ limitations under the License. package repository import ( - "fmt" - "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" @@ -144,7 +142,7 @@ func NewTemplate(input TemplateInput) (Template, error) { // MergeTemplates merges the provided Templates into one Template. // Notes on the merge operation: -// - The merge operation returns an error if all the templates do not have the same TargetNamespace. +// - The merge operation sets targetNamespace empty if all the templates do not share the same TargetNamespace. // - The Variables of the resulting template is a union of all Variables in the templates. // - The default value is picked from the first template that defines it. // The defaults of the same variable in the subsequent templates will be ignored. @@ -173,7 +171,7 @@ func MergeTemplates(templates ...Template) (Template, error) { } if merged.targetNamespace != tmpl.TargetNamespace() { - return nil, fmt.Errorf("cannot merge templates with different targetNamespaces") + merged.targetNamespace = "" } merged.objs = append(merged.objs, tmpl.Objs()...) diff --git a/docs/book/src/developer/providers/contracts/clusterctl.md b/docs/book/src/developer/providers/contracts/clusterctl.md index 60d3ac2f1928..68ca06e00b1b 100644 --- a/docs/book/src/developer/providers/contracts/clusterctl.md +++ b/docs/book/src/developer/providers/contracts/clusterctl.md @@ -416,6 +416,12 @@ ClusterClass definitions MUST be stored in the same location as the component YA in the Cluster template; Cluster template files using a ClusterClass are usually simpler because they are no longer required to have all the templates. +Additionally, namespace of the ClusterClass can differ from the Cluster. This requires specifying +Cluster.spec.topology.classNamespace field in the Cluster template; +Cluster template may define classNamespace as `classNamespace: ${CLUSTER_CLASS_NAMESPACE:=""}`, which would allow to +optionally specify namespace of the referred ClusterClass via env. Empty or missing value is uses Cluster namespace +by default. + Each provider should create user facing documentation with the list of available ClusterClass definitions. #### Target namespace diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 80fe78a0438b..6467c3b57c3d 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -102,6 +102,9 @@ type ClusterUpgradeWithRuntimeSDKSpecInput struct { // ExtensionServiceName is the name of the service to configure in the test-namespace scoped ExtensionConfig. ExtensionServiceName string + + // DeployClusterClassInSeparateNamespace defines if the ClusterClass should be deployed in a separate namespace. + DeployClusterClassInSeparateNamespace bool } // ClusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite. @@ -116,9 +119,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl ) var ( - input ClusterUpgradeWithRuntimeSDKSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc + input ClusterUpgradeWithRuntimeSDKSpecInput + namespace, clusterClassNamespace *corev1.Namespace + cancelWatches context.CancelFunc controlPlaneMachineCount int64 workerMachineCount int64 @@ -158,6 +161,10 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl // Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events. namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + if input.DeployClusterClassInSeparateNamespace { + clusterClassNamespace = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{Creator: input.BootstrapClusterProxy.GetClient(), Name: fmt.Sprintf("%s-clusterclass", namespace.Name)}, "40s", "10s") + Expect(clusterClassNamespace).ToNot(BeNil(), "Failed to create namespace") + } clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -171,11 +178,16 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl By("Deploy Test Extension ExtensionConfig") + namespaces := []string{namespace.Name} + if input.DeployClusterClassInSeparateNamespace { + namespaces = append(namespaces, clusterClassNamespace.Name) + } + // In this test we are defaulting all handlers to blocking because we expect the handlers to block the // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to true enforces that the test-extension // automatically creates the ConfigMap with blocking preloaded responses. Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, - extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, namespace.Name))). + extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, namespaces...))). To(Succeed(), "Failed to create the extension config") By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation") @@ -194,6 +206,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl // This is used to template the name of the ExtensionConfig into the ClusterClass. "EXTENSION_CONFIG_NAME": input.ExtensionConfigName, } + if input.DeployClusterClassInSeparateNamespace { + variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -341,6 +356,14 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl Deleter: input.BootstrapClusterProxy.GetClient(), Name: namespace.Name, }) + + if input.DeployClusterClassInSeparateNamespace { + Byf("Deleting namespace used for hosting the %q test spec ClusterClass", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: clusterClassNamespace.Name, + }) + } } cancelWatches() }) diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go index 8cd5b930f2aa..a4d4de5f834c 100644 --- a/test/e2e/cluster_upgrade_runtimesdk_test.go +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -60,3 +60,37 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt } }) }) + +var _ = Describe("When upgrading a workload cluster using ClusterClass in a different NS with RuntimeSDK [ClusterClass]", Label("ClusterClass"), func() { + ClusterUpgradeWithRuntimeSDKSpec(ctx, func() ClusterUpgradeWithRuntimeSDKSpecInput { + version, err := semver.ParseTolerant(e2eConfig.GetVariable(KubernetesVersionUpgradeFrom)) + Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version") + if version.LT(semver.MustParse("1.24.0")) { + Fail("This test only supports upgrades from Kubernetes >= v1.24.0") + } + + return ClusterUpgradeWithRuntimeSDKSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("docker"), + PostUpgrade: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that the resourceVersions are stable, i.e. it verifies there are no + // continuous reconciles when everything should be stable. + framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) + }, + // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. + Flavor: ptr.To("upgrades-runtimesdk"), + DeployClusterClassInSeparateNamespace: true, + // The runtime extension gets deployed to the test-extension-system namespace and is exposed + // by the test-extension-webhook-service. + // The below values are used when creating the cluster-wide ExtensionConfig to refer + // the actual service. + ExtensionServiceNamespace: "test-extension-system", + ExtensionServiceName: "test-extension-webhook-service", + ExtensionConfigName: "k8s-upgrade-with-runtimesdk-cross-ns", + } + }) +}) diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 61979966792a..0bb29041dceb 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -135,11 +135,11 @@ type ClusterClassChangesSpecInput struct { // indirect test coverage of this from other tests as well. func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClassChangesSpecInput) { var ( - specName = "clusterclass-changes" - input ClusterClassChangesSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "clusterclass-changes" + input ClusterClassChangesSpecInput + namespace, clusterClassNamespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -155,6 +155,8 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas // Set up a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + clusterClassNamespace = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{Creator: input.BootstrapClusterProxy.GetClient(), Name: fmt.Sprintf("%s-clusterclass", namespace.Name)}, "40s", "10s") + Expect(clusterClassNamespace).ToNot(BeNil(), "Failed to create namespace") clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -184,10 +186,21 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }, clusterResources) + originalClusterClassState := clusterResources.ClusterClass.DeepCopy() + + By("Rebasing the Cluster to a ClusterClass with a modified label for MachineDeployments and wait for changes to be applied to the MachineDeployment objects") + rebasedClusterClass := rebaseClusterClassAndWait(ctx, rebaseClusterClassAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClass: clusterResources.ClusterClass, + Cluster: clusterResources.Cluster, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + }) + By("Modifying the control plane configuration in ClusterClass and wait for changes to be applied to the control plane object") modifyControlPlaneViaClusterClassAndWait(ctx, modifyClusterClassControlPlaneAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - ClusterClass: clusterResources.ClusterClass, + ClusterClass: rebasedClusterClass, Cluster: clusterResources.Cluster, ModifyControlPlaneFields: input.ModifyControlPlaneFields, WaitForControlPlane: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), @@ -196,7 +209,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas By("Modifying the MachineDeployment configuration in ClusterClass and wait for changes to be applied to the MachineDeployment objects") modifyMachineDeploymentViaClusterClassAndWait(ctx, modifyMachineDeploymentViaClusterClassAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - ClusterClass: clusterResources.ClusterClass, + ClusterClass: rebasedClusterClass, Cluster: clusterResources.Cluster, ModifyBootstrapConfigTemplateFields: input.ModifyMachineDeploymentBootstrapConfigTemplateFields, ModifyInfrastructureMachineTemplateFields: input.ModifyMachineDeploymentInfrastructureMachineTemplateFields, @@ -206,19 +219,55 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas By("Modifying the MachinePool configuration in ClusterClass and wait for changes to be applied to the MachinePool objects") modifyMachinePoolViaClusterClassAndWait(ctx, modifyMachinePoolViaClusterClassAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - ClusterClass: clusterResources.ClusterClass, + ClusterClass: rebasedClusterClass, Cluster: clusterResources.Cluster, ModifyBootstrapConfigTemplateFields: input.ModifyMachinePoolBootstrapConfigTemplateFields, ModifyInfrastructureMachinePoolTemplateFields: input.ModifyMachinePoolInfrastructureMachinePoolTemplateFields, WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) - By("Rebasing the Cluster to a ClusterClass with a modified label for MachineDeployments and wait for changes to be applied to the MachineDeployment objects") - rebaseClusterClassAndWait(ctx, rebaseClusterClassAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - ClusterClass: clusterResources.ClusterClass, - Cluster: clusterResources.Cluster, - WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + By("Rebasing the Cluster to a copy of original ClusterClass in a different namespace") + rebasedClusterClass = rebaseClusterClassAndWait(ctx, rebaseClusterClassAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClassNamespace: clusterClassNamespace.Name, + ClusterClass: originalClusterClassState, + Cluster: clusterResources.Cluster, + // This rebase will revert the CP back to the original state. If we modified CP fields via + // modifyControlPlaneViaClusterClassAndWait this rebase will leads to changes in the CP. + ControlPlaneChanged: input.ModifyControlPlaneFields != nil, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + }) + + By("Performing modifications on the referenced ClusterClass templates in a different namespace") + + By("Modifying the control plane configuration in second ClusterClass and wait for changes to be applied to the control plane object") + modifyControlPlaneViaClusterClassAndWait(ctx, modifyClusterClassControlPlaneAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClass: rebasedClusterClass, + Cluster: clusterResources.Cluster, + ModifyControlPlaneFields: input.ModifyControlPlaneFields, + WaitForControlPlane: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + }) + + By("Modifying the MachineDeployment configuration in ClusterClass and wait for changes to be applied to the MachineDeployment objects") + modifyMachineDeploymentViaClusterClassAndWait(ctx, modifyMachineDeploymentViaClusterClassAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClass: rebasedClusterClass, + Cluster: clusterResources.Cluster, + ModifyBootstrapConfigTemplateFields: input.ModifyMachineDeploymentBootstrapConfigTemplateFields, + ModifyInfrastructureMachineTemplateFields: input.ModifyMachineDeploymentInfrastructureMachineTemplateFields, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Modifying the MachinePool configuration in ClusterClass and wait for changes to be applied to the MachinePool objects") + modifyMachinePoolViaClusterClassAndWait(ctx, modifyMachinePoolViaClusterClassAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClass: rebasedClusterClass, + Cluster: clusterResources.Cluster, + ModifyBootstrapConfigTemplateFields: input.ModifyMachinePoolBootstrapConfigTemplateFields, + ModifyInfrastructureMachinePoolTemplateFields: input.ModifyMachinePoolInfrastructureMachinePoolTemplateFields, + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) By("Deleting a MachineDeploymentTopology in the Cluster Topology and wait for associated MachineDeployment to be deleted") @@ -233,6 +282,14 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + + if !input.SkipCleanup { + Byf("Deleting namespace used for hosting the %q test spec ClusterClass", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: clusterClassNamespace.Name, + }) + } }) } @@ -300,6 +357,18 @@ func modifyControlPlaneViaClusterClassAndWait(ctx context.Context, input modifyC g.Expect(ok).To(BeTrue(), fmt.Sprintf("failed to get field %q", fieldPath)) g.Expect(currentValue).To(Equal(expectedValue), fmt.Sprintf("field %q should be equal", fieldPath)) } + + // Ensure KCP recognized the change and finished scaling. + // Note: This is to ensure to not hit https://github.com/kubernetes-sigs/cluster-api/issues/11772 + observedGeneration, ok, err := unstructured.NestedInt64(controlPlane.Object, "status", "observedGeneration") + g.Expect(err).ToNot(HaveOccurred()) + if ok { + g.Expect(controlPlane.GetGeneration()).To(BeComparableTo(observedGeneration)) + } + scaling, err := contract.ControlPlane().IsScaling(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(scaling).To(BeFalse()) + return nil }, input.WaitForControlPlane...).Should(Succeed()) } @@ -660,16 +729,19 @@ func assertMachinePoolTopologyFields(g Gomega, mp expv1.MachinePool, mpTopology // rebaseClusterClassAndWaitInput is the input type for rebaseClusterClassAndWait. type rebaseClusterClassAndWaitInput struct { - ClusterProxy framework.ClusterProxy - ClusterClass *clusterv1.ClusterClass - Cluster *clusterv1.Cluster - WaitForMachineDeployments []interface{} + ClusterProxy framework.ClusterProxy + ClusterClass *clusterv1.ClusterClass + ClusterClassNamespace string + Cluster *clusterv1.Cluster + ControlPlaneChanged bool + WaitForMachineDeployments []interface{} + WaitForControlPlaneIntervals []interface{} } // rebaseClusterClassAndWait rebases the cluster to a copy of the ClusterClass with different worker labels // and waits until the changes are rolled out to the MachineDeployments of the Cluster. // NOTE: This helper is really specific to this test, so we are keeping this private vs. adding it to the framework. -func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndWaitInput) { +func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndWaitInput) *clusterv1.ClusterClass { Expect(ctx).NotTo(BeNil(), "ctx is required for RebaseClusterClassAndWait") Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling RebaseClusterClassAndWait") Expect(input.ClusterClass).ToNot(BeNil(), "Invalid argument. input.ClusterClass can't be nil when calling RebaseClusterClassAndWait") @@ -679,11 +751,18 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW var testWorkerLabelName = "rebase-diff" + sourceClusterClass := input.ClusterClass.DeepCopy() + Expect(mgmtClient.Get(ctx, client.ObjectKeyFromObject(sourceClusterClass), sourceClusterClass)).To(Succeed()) + // Create a new ClusterClass with a new name and the new worker label set. - newClusterClass := input.ClusterClass.DeepCopy() + newClusterClass := sourceClusterClass.DeepCopy() newClusterClassName := fmt.Sprintf("%s-%s", input.ClusterClass.Name, util.RandomString(6)) newClusterClass.SetName(newClusterClassName) + if input.ClusterClassNamespace != "" { + newClusterClass.SetNamespace(input.ClusterClassNamespace) + } newClusterClass.SetResourceVersion("") + for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments { if mdClass.Template.Metadata.Labels == nil { mdClass.Template.Metadata.Labels = map[string]string{} @@ -691,6 +770,24 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW mdClass.Template.Metadata.Labels[testWorkerLabelName] = mdClass.Class newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass } + + // Copy ClusterClass templates to the new namespace + for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments { + cloneRef(ctx, mgmtClient, mdClass.Template.Infrastructure.Ref, input.ClusterClassNamespace) + cloneRef(ctx, mgmtClient, mdClass.Template.Bootstrap.Ref, input.ClusterClassNamespace) + newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass + } + + for i, mpClass := range newClusterClass.Spec.Workers.MachinePools { + cloneRef(ctx, mgmtClient, mpClass.Template.Infrastructure.Ref, input.ClusterClassNamespace) + cloneRef(ctx, mgmtClient, mpClass.Template.Bootstrap.Ref, input.ClusterClassNamespace) + newClusterClass.Spec.Workers.MachinePools[i] = mpClass + } + + cloneRef(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref, input.ClusterClassNamespace) + cloneRef(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.Ref, input.ClusterClassNamespace) + cloneRef(ctx, mgmtClient, newClusterClass.Spec.Infrastructure.Ref, input.ClusterClassNamespace) + Expect(mgmtClient.Create(ctx, newClusterClass)).To(Succeed()) // Get the current ControlPlane, we will later verify that it has not changed. @@ -702,6 +799,7 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW patchHelper, err := patch.NewHelper(input.Cluster, mgmtClient) Expect(err).ToNot(HaveOccurred()) input.Cluster.Spec.Topology.Class = newClusterClassName + input.Cluster.Spec.Topology.ClassNamespace = input.ClusterClassNamespace // We have to retry the patch. The ClusterClass was just created so the client cache in the // controller/webhook might not be aware of it yet. If the webhook is not aware of the ClusterClass // we get a "Cluster ... can't be validated. ClusterClass ... can not be retrieved" error. @@ -714,10 +812,10 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW // NOTE: We only wait until the change is rolled out to the MachineDeployment objects and not to the worker machines // to speed up the test and focus the test on the ClusterClass feature. log.Logf("Waiting for MachineDeployment rollout for MachineDeploymentTopology %q (class %q) to complete.", mdTopology.Name, mdTopology.Class) - Eventually(func() error { + Eventually(func(g Gomega) error { // Get MachineDeployment for the current MachineDeploymentTopology. mdList := &clusterv1.MachineDeploymentList{} - Expect(mgmtClient.List(ctx, mdList, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels{ + g.Expect(mgmtClient.List(ctx, mdList, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels{ clusterv1.ClusterTopologyMachineDeploymentNameLabel: mdTopology.Name, })).To(Succeed()) if len(mdList.Items) != 1 { @@ -738,13 +836,58 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW }, input.WaitForMachineDeployments...).Should(Succeed()) } - // Verify that the ControlPlane has not been changed. - // NOTE: MachineDeployments are rolled out before the ControlPlane. Thus, we know that the - // ControlPlane would have been updated by now, if there have been any changes. - afterControlPlane, err := external.Get(ctx, mgmtClient, controlPlaneRef) + if input.ControlPlaneChanged { + Eventually(func(g Gomega) { + afterControlPlane, err := external.Get(ctx, mgmtClient, controlPlaneRef) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(afterControlPlane.GetGeneration()).ToNot(Equal(beforeControlPlane.GetGeneration()), + "ControlPlane generation should be incremented during the rebase because ControlPlane expected to be changed.") + + // Ensure KCP recognized the change and finished scaling. + // Note: This is to ensure to not hit https://github.com/kubernetes-sigs/cluster-api/issues/11772 + observedGeneration, ok, err := unstructured.NestedInt64(controlPlane.Object, "status", "observedGeneration") + g.Expect(err).ToNot(HaveOccurred()) + if ok { + g.Expect(controlPlane.GetGeneration()).To(BeComparableTo(observedGeneration)) + } + scaling, err := contract.ControlPlane().IsScaling(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(scaling).To(BeFalse()) + }, input.WaitForControlPlaneIntervals...).Should(BeNil()) + } else { + Consistently(func(g Gomega) { + // Verify that the ControlPlane has not been changed. + // NOTE: MachineDeployments are rolled out before the ControlPlane. Thus, we know that the + // ControlPlane would have been updated by now, if there have been any changes. + afterControlPlane, err := external.Get(ctx, mgmtClient, controlPlaneRef) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(afterControlPlane.GetGeneration()).To(Equal(beforeControlPlane.GetGeneration()), + "ControlPlane generation should not be incremented during the rebase because ControlPlane should not be affected.") + }, "5s", "100ms").Should(Succeed()) + } + + return newClusterClass +} + +// cloneRef performs required modifications to avoid conflict, and create a copy of the referenced object, updating the ref in-place. +func cloneRef(ctx context.Context, cl client.Client, ref *corev1.ObjectReference, namespace string) { + if ref == nil { + return + } + + template, err := external.Get(ctx, cl, ref) Expect(err).ToNot(HaveOccurred()) - Expect(afterControlPlane.GetGeneration()).To(Equal(beforeControlPlane.GetGeneration()), - "ControlPlane generation should not be incremented during the rebase because ControlPlane should not be affected.") + if namespace != "" { + template.SetNamespace(namespace) + } else { + template.SetGenerateName(template.GetName() + "-") + template.SetName("") + } + template.SetResourceVersion("") + template.SetOwnerReferences(nil) + Expect(cl.Create(ctx, template)).To(Succeed()) + + *ref = *external.GetObjectReference(template) } // deleteMachineDeploymentTopologyAndWaitInput is the input type for deleteMachineDeploymentTopologyAndWaitInput. diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 1fa907f3aab9..8e60cac08e51 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -14,6 +14,7 @@ spec: serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: class: "quick-start" + classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 004350219aa6..c27d9bb62220 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -14,6 +14,7 @@ spec: serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: class: "quick-start-runtimesdk" + classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: {} diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 14d17c5d80ce..c53ab25e5bd0 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -44,6 +44,9 @@ type QuickStartSpecInput struct { // If not set, a random one will be generated. ClusterName *string + // DeployClusterClassInSeparateNamespace defines if the ClusterClass should be deployed in a separate namespace. + DeployClusterClassInSeparateNamespace bool + // InfrastructureProvider allows to specify the infrastructure provider to be used when looking for // cluster templates. // If not set, clusterctl will look at the infrastructure provider installed in the management cluster; @@ -81,11 +84,12 @@ type QuickStartSpecInput struct { // NOTE: This test works with Clusters with and without ClusterClass. func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) { var ( - specName = "quick-start" - input QuickStartSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "quick-start" + input QuickStartSpecInput + namespace *corev1.Namespace + clusterClassNamespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -100,6 +104,12 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + + if input.DeployClusterClassInSeparateNamespace { + clusterClassNamespace = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{Creator: input.BootstrapClusterProxy.GetClient(), Name: fmt.Sprintf("%s-clusterclass", namespace.Name)}, "40s", "10s") + Expect(clusterClassNamespace).ToNot(BeNil(), "Failed to create namespace") + } + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -130,11 +140,19 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) if input.ClusterName != nil { clusterName = *input.ClusterName } + + variables := map[string]string{} + if input.DeployClusterClassInSeparateNamespace { + variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name + By("Creating a cluster referencing a ClusterClass from another namespace") + } + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, + ClusterctlVariables: variables, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, Flavor: flavor, @@ -154,11 +172,18 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) } }, }, clusterResources) + By("PASSED!") }) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + if input.DeployClusterClassInSeparateNamespace && !input.SkipCleanup { + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: clusterClassNamespace.Name, + }) + } }) } diff --git a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml index 789d375a662a..207d585c8ca2 100644 --- a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml +++ b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml @@ -54,9 +54,9 @@ spec: patches: - name: test-patch external: - generateExtension: generate-patches.k8s-upgrade-with-runtimesdk - validateExtension: validate-topology.k8s-upgrade-with-runtimesdk - discoverVariablesExtension: discover-variables.k8s-upgrade-with-runtimesdk + generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + validateExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerClusterTemplate diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index e8625849246a..79132f8e4f62 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -394,7 +394,7 @@ func forceClusterClassReconcile(ctx context.Context, cli client.Client, clusterK if cluster.Spec.Topology != nil { class := &clusterv1.ClusterClass{} - Expect(cli.Get(ctx, client.ObjectKey{Namespace: clusterKey.Namespace, Name: cluster.Spec.Topology.Class}, class)).To(Succeed()) + Expect(cli.Get(ctx, cluster.GetClassKey(), class)).To(Succeed()) annotationPatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) Expect(cli.Patch(ctx, class, annotationPatch)).To(Succeed()) }