From 4d7d077483957b6b523be5aed1f91bd7a1b8eb23 Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Mon, 20 Nov 2023 17:06:18 +0800 Subject: [PATCH] =?UTF-8?q?=F0=9F=8C=B1=20Update=20e2e=20for=20upgrade=20c?= =?UTF-8?q?luster=20manager=20(#394)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update e2e for upgrade cluster manager Signed-off-by: Jian Qiu * Fix nil pointer panic in upgrade Signed-off-by: Jian Qiu --------- Signed-off-by: Jian Qiu --- pkg/cmd/init/scenario/init/operator.yaml | 2 +- pkg/cmd/unjoin/exec.go | 2 +- pkg/cmd/upgrade/clustermanager/exec.go | 79 ++++++++------ pkg/cmd/upgrade/clustermanager/options.go | 33 +----- pkg/cmd/upgrade/klusterlet/exec.go | 54 +++++----- pkg/cmd/upgrade/klusterlet/options.go | 3 +- test/e2e/clusteradm/e2e_suite_test.go | 2 +- test/e2e/clusteradm/upgrade_test.go | 122 ++++++++++++++++++++-- test/e2e/util/e2econf.go | 22 ++-- test/e2e/util/helper.go | 46 +------- test/e2e/util/util.go | 13 ++- test/e2e/util/values.go | 11 +- 12 files changed, 236 insertions(+), 153 deletions(-) diff --git a/pkg/cmd/init/scenario/init/operator.yaml b/pkg/cmd/init/scenario/init/operator.yaml index 4ecd6f91a..675468380 100644 --- a/pkg/cmd/init/scenario/init/operator.yaml +++ b/pkg/cmd/init/scenario/init/operator.yaml @@ -41,7 +41,7 @@ spec: - args: - /registration-operator - hub - image: {{ .Hub.Registry }}/registration-operator:{{ .BundleVersion.RegistrationImageVersion }} + image: {{ .Hub.Registry }}/registration-operator:{{ .BundleVersion.OperatorImageVersion }} imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/pkg/cmd/unjoin/exec.go b/pkg/cmd/unjoin/exec.go index b6dd2d29c..d8a56b6e1 100644 --- a/pkg/cmd/unjoin/exec.go +++ b/pkg/cmd/unjoin/exec.go @@ -187,7 +187,7 @@ func (o *Options) purgeKlusterlet(kubeClient kubernetes.Interface, klusterletCli } b := retry.DefaultBackoff - b.Duration = 1 * time.Second + b.Duration = 5 * time.Second err = WaitResourceToBeDelete(context.Background(), klusterletClient, o.values.KlusterletName, b) if err != nil { return err diff --git a/pkg/cmd/upgrade/clustermanager/exec.go b/pkg/cmd/upgrade/clustermanager/exec.go index 687191874..4805bce4b 100644 --- a/pkg/cmd/upgrade/clustermanager/exec.go +++ b/pkg/cmd/upgrade/clustermanager/exec.go @@ -2,11 +2,16 @@ package clustermanager import ( + "context" "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" + clusteradminit "open-cluster-management.io/clusteradm/pkg/cmd/init" "open-cluster-management.io/clusteradm/pkg/helpers/reader" "github.com/spf13/cobra" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/klog/v2" init_scenario "open-cluster-management.io/clusteradm/pkg/cmd/init/scenario" "open-cluster-management.io/clusteradm/pkg/helpers" @@ -16,54 +21,66 @@ import ( func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { klog.V(1).InfoS("init options:", "dry-run", o.ClusteradmFlags.DryRun) - o.values = Values{ - Hub: Hub{ - Registry: o.registry, - }, - } - - versionBundle, err := version.GetVersionBundle(o.bundleVersion) - - if err != nil { - klog.Errorf("unable to retrieve version ", err) - return err - } - - o.values.BundleVersion = BundleVersion{ - RegistrationImageVersion: versionBundle.Registration, - PlacementImageVersion: versionBundle.Placement, - WorkImageVersion: versionBundle.Work, - OperatorImageVersion: versionBundle.Operator, - } f := o.ClusteradmFlags.KubectlFactory o.builder = f.NewBuilder() - return nil -} - -func (o *Options) validate() (err error) { - err = o.ClusteradmFlags.ValidateHub() + restConfig, err := f.ToRESTConfig() if err != nil { return err } - restConfig, err := o.ClusteradmFlags.KubectlFactory.ToRESTConfig() + operatorClient, err := operatorclient.NewForConfig(restConfig) if err != nil { return err } - - apiExtensionsClient, err := apiextensionsclient.NewForConfig(restConfig) + cm, err := operatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), "cluster-manager", metav1.GetOptions{}) + if errors.IsNotFound(err) { + return fmt.Errorf("clustermanager is not installed") + } if err != nil { return err } - installed, err := helpers.IsClusterManagerInstalled(apiExtensionsClient) + + versionBundle, err := version.GetVersionBundle(o.bundleVersion) if err != nil { + klog.Errorf("unable to retrieve version ", err) return err } - if !installed { - return fmt.Errorf("clustermanager is not installed") + o.values = clusteradminit.Values{ + Hub: clusteradminit.Hub{ + Registry: o.registry, + }, + BundleVersion: clusteradminit.BundleVersion{ + RegistrationImageVersion: versionBundle.Registration, + PlacementImageVersion: versionBundle.Placement, + WorkImageVersion: versionBundle.Work, + OperatorImageVersion: versionBundle.Operator, + }, + } + + // reconstruct values from the cluster manager CR. + if cm.Spec.RegistrationConfiguration != nil { + o.values.RegistrationFeatures = cm.Spec.RegistrationConfiguration.FeatureGates + if len(cm.Spec.RegistrationConfiguration.AutoApproveUsers) > 0 { + o.values.AutoApprove = true + } + } + if cm.Spec.WorkConfiguration != nil { + o.values.WorkFeatures = cm.Spec.WorkConfiguration.FeatureGates + } + if cm.Spec.AddOnManagerConfiguration != nil { + o.values.AddonFeatures = cm.Spec.AddOnManagerConfiguration.FeatureGates + } + + return nil +} + +func (o *Options) validate() (err error) { + err = o.ClusteradmFlags.ValidateHub() + if err != nil { + return err } //TODO check desired version is greater then current version diff --git a/pkg/cmd/upgrade/clustermanager/options.go b/pkg/cmd/upgrade/clustermanager/options.go index 7258fb332..aebf45c91 100644 --- a/pkg/cmd/upgrade/clustermanager/options.go +++ b/pkg/cmd/upgrade/clustermanager/options.go @@ -4,6 +4,7 @@ package clustermanager import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + clusteradminit "open-cluster-management.io/clusteradm/pkg/cmd/init" genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions" ) @@ -12,7 +13,7 @@ type Options struct { //ClusteradmFlags: The generic options from the clusteradm cli-runtime. ClusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags - values Values + values clusteradminit.Values //The file to output the resources will be sent to the file. registry string //version of predefined compatible image versions @@ -25,36 +26,6 @@ type Options struct { builder *resource.Builder } -type BundleVersion struct { - // registration image version - RegistrationImageVersion string - // placement image version - PlacementImageVersion string - // work image version - WorkImageVersion string - // operator image version - OperatorImageVersion string -} - -// Values: The values used in the template -type Values struct { - //The values related to the hub - Registry string `json:"registry"` - //bundle version - BundleVersion BundleVersion - //Hub: Hub information - Hub Hub -} - -type Hub struct { - //APIServer: The API Server external URL - APIServer string - //KubeConfig: The kubeconfig of the bootstrap secret to connect to the hub - KubeConfig string - //image registry - Registry string -} - func newOptions(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *Options { return &Options{ ClusteradmFlags: clusteradmFlags, diff --git a/pkg/cmd/upgrade/klusterlet/exec.go b/pkg/cmd/upgrade/klusterlet/exec.go index 379fb1140..0e56abb8a 100644 --- a/pkg/cmd/upgrade/klusterlet/exec.go +++ b/pkg/cmd/upgrade/klusterlet/exec.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" + "open-cluster-management.io/clusteradm/pkg/cmd/join" join_scenario "open-cluster-management.io/clusteradm/pkg/cmd/join/scenario" "open-cluster-management.io/clusteradm/pkg/helpers" "open-cluster-management.io/clusteradm/pkg/helpers/reader" @@ -19,11 +20,7 @@ import ( //nolint:deadcode,varcheck const ( - klusterletName = "klusterlet" - registrationOperatorNamespace = "open-cluster-management" - klusterletCRD = "klusterlets.operator.open-cluster-management.io" - componentNameRegistrationAgent = "klusterlet-registration-agent" - componentNameWorkAgent = "klusterlet-work-agent" + klusterletName = "klusterlet" ) func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { @@ -32,7 +29,10 @@ func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { return err } - cfg, err := o.ClusteradmFlags.KubectlFactory.ToRESTConfig() + f := o.ClusteradmFlags.KubectlFactory + o.builder = f.NewBuilder() + + cfg, err := f.ToRESTConfig() if err != nil { return err } @@ -47,30 +47,36 @@ func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { return err } - klog.V(1).InfoS("init options:", "dry-run", o.ClusteradmFlags.DryRun) - o.values = Values{ - ClusterName: k.Spec.ClusterName, - Hub: Hub{ - Registry: o.registry, - }, - } - versionBundle, err := version.GetVersionBundle(o.bundleVersion) - if err != nil { klog.Errorf("unable to retrieve version ", err) return err } - o.values.BundleVersion = BundleVersion{ - RegistrationImageVersion: versionBundle.Registration, - PlacementImageVersion: versionBundle.Placement, - WorkImageVersion: versionBundle.Work, - OperatorImageVersion: versionBundle.Operator, + klog.V(1).InfoS("init options:", "dry-run", o.ClusteradmFlags.DryRun) + o.values = join.Values{ + Registry: o.registry, + ClusterName: k.Spec.ClusterName, + Klusterlet: join.Klusterlet{ + Name: k.Name, + Mode: string(k.Spec.DeployOption.Mode), + KlusterletNamespace: k.Spec.Namespace, + }, + BundleVersion: join.BundleVersion{ + RegistrationImageVersion: versionBundle.Registration, + PlacementImageVersion: versionBundle.Placement, + WorkImageVersion: versionBundle.Work, + OperatorImageVersion: versionBundle.Operator, + }, } - f := o.ClusteradmFlags.KubectlFactory - o.builder = f.NewBuilder() + // reconstruct values from the klusterlet CR. + if k.Spec.RegistrationConfiguration != nil { + o.values.RegistrationFeatures = k.Spec.RegistrationConfiguration.FeatureGates + } + if k.Spec.WorkConfiguration != nil { + o.values.WorkFeatures = k.Spec.WorkConfiguration.FeatureGates + } return nil } @@ -108,8 +114,6 @@ func (o *Options) run() error { } files := []string{ - "join/namespace_agent.yaml", - "join/namespace_addon.yaml", "join/namespace.yaml", "join/cluster_role.yaml", "join/cluster_role_binding.yaml", @@ -128,7 +132,7 @@ func (o *Options) run() error { } if !o.ClusteradmFlags.DryRun { - if err := wait.WaitUntilCRDReady(apiExtensionsClient, "clustermanagers.operator.open-cluster-management.io", o.wait); err != nil { + if err := wait.WaitUntilCRDReady(apiExtensionsClient, "klusterlets.operator.open-cluster-management.io", o.wait); err != nil { return err } } diff --git a/pkg/cmd/upgrade/klusterlet/options.go b/pkg/cmd/upgrade/klusterlet/options.go index 86e31e90f..0c3788f73 100644 --- a/pkg/cmd/upgrade/klusterlet/options.go +++ b/pkg/cmd/upgrade/klusterlet/options.go @@ -4,6 +4,7 @@ package klusterlet import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + "open-cluster-management.io/clusteradm/pkg/cmd/join" genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions" ) @@ -12,7 +13,7 @@ type Options struct { //ClusteradmFlags: The generic options from the clusteradm cli-runtime. ClusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags - values Values + values join.Values //The file to output the resources will be sent to the file. registry string //version of predefined compatible image versions diff --git a/test/e2e/clusteradm/e2e_suite_test.go b/test/e2e/clusteradm/e2e_suite_test.go index 317ededd2..6204b3f18 100644 --- a/test/e2e/clusteradm/e2e_suite_test.go +++ b/test/e2e/clusteradm/e2e_suite_test.go @@ -77,7 +77,7 @@ var _ = ginkgo.BeforeSuite(func() { operatorClient, err = operatorclient.NewForConfig(hubConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = clusterv1.AddToScheme(scheme.Scheme) + err = clusterv1.Install(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restConfig = hubConfig diff --git a/test/e2e/clusteradm/upgrade_test.go b/test/e2e/clusteradm/upgrade_test.go index 53352f7d5..972366c13 100644 --- a/test/e2e/clusteradm/upgrade_test.go +++ b/test/e2e/clusteradm/upgrade_test.go @@ -2,8 +2,15 @@ package clusteradme2e import ( + "context" + "fmt" + "time" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "open-cluster-management.io/clusteradm/pkg/helpers/version" ) var _ = ginkgo.Describe("test clusteradm upgrade clustermanager and Klusterlets", ginkgo.Ordered, func() { @@ -12,28 +19,131 @@ var _ = ginkgo.Describe("test clusteradm upgrade clustermanager and Klusterlets" ginkgo.By("reset e2e environment...") err := e2e.ClearEnv() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2e.ResetEnv() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) var err error - ginkgo.It("run cluster manager upgrade version latest ", func() { - ginkgo.Skip("Upgrade is skipped due to flaky when destroying control plane. Need to revisit it after fix cleanup issue") + ginkgo.It("run cluster manager and klusterlet upgrade version latest ", func() { + ginkgo.By("init hub with service account") + err = e2e.Clusteradm().Init( + "--context", e2e.Cluster().Hub().Context(), + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "clusteradm init error") + + ginkgo.By("Check the version of operator and controller") + gomega.Eventually(func() error { + operator, err := kubeClient.AppsV1().Deployments("open-cluster-management").Get(context.TODO(), "cluster-manager", metav1.GetOptions{}) + if err != nil { + return err + } + if operator.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration-operator:v"+version.GetDefaultBundleVersion() { + return fmt.Errorf("version of the operator is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + registration, err := kubeClient.AppsV1().Deployments("open-cluster-management-hub").Get( + context.TODO(), "cluster-manager-registration-controller", metav1.GetOptions{}) + if err != nil { + return err + } + if registration.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration:v"+version.GetDefaultBundleVersion() { + return fmt.Errorf("version of the registration controller is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + return nil + }, 120*time.Second, 5*time.Second).Should(gomega.Succeed()) + + ginkgo.By("managedcluster1 join hub") + err = e2e.Clusteradm().Join( + "--context", e2e.Cluster().ManagedCluster1().Context(), + "--hub-token", e2e.CommandResult().Token(), "--hub-apiserver", e2e.CommandResult().Host(), + "--cluster-name", e2e.Cluster().ManagedCluster1().Name(), + "--wait", + "--force-internal-endpoint-lookup", + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "managedcluster1 join error") + + ginkgo.By("hub accept managedcluster1") + err = e2e.Clusteradm().Accept( + "--clusters", e2e.Cluster().ManagedCluster1().Name(), + "--wait", + "--context", e2e.Cluster().Hub().Context(), + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "clusteradm accept error") + + mcl1KubeClient, err := kubernetes.NewForConfig(e2e.Cluster().ManagedCluster1().KubeConfig()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Check the version of operator and agent") + gomega.Eventually(func() error { + operator, err := mcl1KubeClient.AppsV1().Deployments("open-cluster-management").Get(context.TODO(), "klusterlet", metav1.GetOptions{}) + if err != nil { + return err + } + if operator.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration-operator:v"+version.GetDefaultBundleVersion() { + return fmt.Errorf("version of the operator is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + registration, err := mcl1KubeClient.AppsV1().Deployments("open-cluster-management-agent").Get( + context.TODO(), "klusterlet-registration-agent", metav1.GetOptions{}) + if err != nil { + return err + } + if registration.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration:v"+version.GetDefaultBundleVersion() { + return fmt.Errorf("version of the registration agent is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + return nil + }, 120*time.Second, 5*time.Second).Should(gomega.Succeed()) + err = e2e.Clusteradm().Upgrade( "clustermanager", - "--bundle-version", "latest", + "--bundle-version=latest", "--context", e2e.Cluster().Hub().Context(), ) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "clusteradm upgrade error") + ginkgo.By("Upgrade to the latest version") + gomega.Eventually(func() error { + operator, err := kubeClient.AppsV1().Deployments("open-cluster-management").Get(context.TODO(), "cluster-manager", metav1.GetOptions{}) + if err != nil { + return err + } + if operator.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration-operator:latest" { + return fmt.Errorf("version of the operator is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + registration, err := kubeClient.AppsV1().Deployments("open-cluster-management-hub").Get( + context.TODO(), "cluster-manager-registration-controller", metav1.GetOptions{}) + if err != nil { + return err + } + if registration.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration:latest" { + return fmt.Errorf("version of the controller is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + return nil + }, 120*time.Second, 5*time.Second).Should(gomega.Succeed()) + err = e2e.Clusteradm().Upgrade( "klusterlet", - "--bundle-version", "latest", + "--bundle-version=latest", "--context", e2e.Cluster().ManagedCluster1().Context(), ) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "klusterlet upgrade error") + + gomega.Eventually(func() error { + operator, err := mcl1KubeClient.AppsV1().Deployments("open-cluster-management").Get(context.TODO(), "klusterlet", metav1.GetOptions{}) + if err != nil { + return err + } + if operator.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration-operator:latest" { + return fmt.Errorf("version of the operator is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + registration, err := mcl1KubeClient.AppsV1().Deployments("open-cluster-management-agent").Get( + context.TODO(), "klusterlet-registration-agent", metav1.GetOptions{}) + if err != nil { + return err + } + if registration.Spec.Template.Spec.Containers[0].Image != "quay.io/open-cluster-management/registration:latest" { + return fmt.Errorf("version of the agent is not correct, get %s", operator.Spec.Template.Spec.Containers[0].Image) + } + return nil + }, 120*time.Second, 5*time.Second).Should(gomega.Succeed()) }) }) diff --git a/test/e2e/util/e2econf.go b/test/e2e/util/e2econf.go index e0e31bc8c..1937528c5 100644 --- a/test/e2e/util/e2econf.go +++ b/test/e2e/util/e2econf.go @@ -28,16 +28,26 @@ func NewTestE2eConfig( hubctx string, mcl1 string, mcl1ctx string, -) *TestE2eConfig { +) (*TestE2eConfig, error) { + hubConfig, err := buildConfigFromFlags(hubctx, kubeconfigpath) + if err != nil { + return nil, err + } + mcl1Config, err := buildConfigFromFlags(mcl1ctx, kubeconfigpath) + if err != nil { + return nil, err + } ctx := clusterValues{ hub: &clusterConfig{ - name: hub, - context: hubctx, + name: hub, + context: hubctx, + kubeConfig: hubConfig, }, mcl1: &clusterConfig{ - name: mcl1, - context: mcl1ctx, + name: mcl1, + context: mcl1ctx, + kubeConfig: mcl1Config, }, } @@ -49,5 +59,5 @@ func NewTestE2eConfig( values: &cfgval, clusteradm: &clusteradm{}, Kubeconfigpath: kubeconfigpath, - } + }, nil } diff --git a/test/e2e/util/helper.go b/test/e2e/util/helper.go index af34f7441..14b7fe7c6 100644 --- a/test/e2e/util/helper.go +++ b/test/e2e/util/helper.go @@ -3,10 +3,8 @@ package util import ( "context" - "fmt" "time" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -18,12 +16,7 @@ import ( // WaitNamespaceDeleted receive a kubeconfigpath, a context name and a namespace name, // then poll until the specific namespace is fully deleted or an error occurs. -func WaitNamespaceDeleted(kubeconfigpath string, ctx string, namespace string) error { - restcfg, err := buildConfigFromFlags(ctx, kubeconfigpath) - if err != nil { - return fmt.Errorf("error occurred while build rest config: %s", err) - } - +func WaitNamespaceDeleted(restcfg *rest.Config, namespace string) error { clientset, err := kubernetes.NewForConfig(restcfg) if err != nil { return err @@ -41,12 +34,7 @@ func WaitNamespaceDeleted(kubeconfigpath string, ctx string, namespace string) e }) } -func DeleteClusterCSRs(kubeconfigpath string, ctx string) error { - restcfg, err := buildConfigFromFlags(ctx, kubeconfigpath) - if err != nil { - return fmt.Errorf("error occurred while build rest config: %s", err) - } - +func DeleteClusterCSRs(restcfg *rest.Config) error { clientset, err := kubernetes.NewForConfig(restcfg) if err != nil { return err @@ -57,12 +45,7 @@ func DeleteClusterCSRs(kubeconfigpath string, ctx string) error { }) } -func DeleteClusterFinalizers(kubeconfigpath string, ctx string) error { - restcfg, err := buildConfigFromFlags(ctx, kubeconfigpath) - if err != nil { - return fmt.Errorf("error occurred while build rest config: %s", err) - } - +func DeleteClusterFinalizers(restcfg *rest.Config) error { clientset, err := clusterclient.NewForConfig(restcfg) if err != nil { return err @@ -82,29 +65,6 @@ func DeleteClusterFinalizers(kubeconfigpath string, ctx string) error { return nil } -func WaitCRDDeleted(kubeconfigpath string, ctx string, name string) error { - restcfg, err := buildConfigFromFlags(ctx, kubeconfigpath) - if err != nil { - return fmt.Errorf("error occurred while build rest config: %s", err) - } - - client, err := apiextensionsclient.NewForConfig(restcfg) - if err != nil { - return err - } - - return wait.PollUntilContextCancel(context.TODO(), 1*time.Second, true, func(ctx context.Context) (bool, error) { - _, err = client.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - if err != nil { - return false, err - } - return false, nil - }) -} - // buildConfigFromFlags build rest config for specified context in the kubeconfigfile. func buildConfigFromFlags(context, kubeconfigPath string) (*rest.Config, error) { return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index b5dff1423..63ffed542 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -53,11 +53,14 @@ func initE2E() (*TestE2eConfig, error) { os.Setenv("KUBECONFIG", filepath.Join(home, ".kube", "config")) } - e2eConf := NewTestE2eConfig( + e2eConf, err := NewTestE2eConfig( os.Getenv("KUBECONFIG"), os.Getenv("HUB_NAME"), os.Getenv("HUB_CTX"), os.Getenv("MANAGED_CLUSTER1_NAME"), os.Getenv("MANAGED_CLUSTER1_CTX"), ) + if err != nil { + return nil, err + } // clearenv set the e2e environment from initial state to empty clearenv := func() error { @@ -71,13 +74,13 @@ func initE2E() (*TestE2eConfig, error) { if err != nil { return err } - err = WaitNamespaceDeleted(e2eConf.Kubeconfigpath, e2eConf.Cluster().ManagedCluster1().Context(), config.ManagedClusterNamespace) + err = WaitNamespaceDeleted(e2eConf.Cluster().ManagedCluster1().KubeConfig(), config.ManagedClusterNamespace) if err != nil { return err } // delete cluster finalizers - err = DeleteClusterFinalizers(e2eConf.Kubeconfigpath, e2eConf.Cluster().Hub().Context()) + err = DeleteClusterFinalizers(e2eConf.Cluster().Hub().KubeConfig()) if err != nil { return err } @@ -89,11 +92,11 @@ func initE2E() (*TestE2eConfig, error) { return err } - err = DeleteClusterCSRs(e2eConf.Kubeconfigpath, e2eConf.Cluster().Hub().Context()) + err = DeleteClusterCSRs(e2eConf.Cluster().Hub().KubeConfig()) if err != nil { return err } - err = WaitNamespaceDeleted(e2eConf.Kubeconfigpath, e2eConf.Cluster().Hub().Context(), config.HubClusterNamespace) + err = WaitNamespaceDeleted(e2eConf.Cluster().Hub().KubeConfig(), config.HubClusterNamespace) if err != nil { return err } diff --git a/test/e2e/util/values.go b/test/e2e/util/values.go index 0d84fdfa7..bfc55aa99 100644 --- a/test/e2e/util/values.go +++ b/test/e2e/util/values.go @@ -1,9 +1,12 @@ // Copyright Contributors to the Open Cluster Management project package util +import "k8s.io/client-go/rest" + type clusterConfig struct { - name string - context string + name string + context string + kubeConfig *rest.Config } func (cc *clusterConfig) Name() string { @@ -14,6 +17,10 @@ func (cc *clusterConfig) Context() string { return cc.context } +func (cc *clusterConfig) KubeConfig() *rest.Config { + return cc.kubeConfig +} + type clusterValues struct { hub *clusterConfig mcl1 *clusterConfig