From ac7dfdd8e9b1a89d44bc3e080fbbee47c64b05a2 Mon Sep 17 00:00:00 2001 From: Adrian Suarez <3846392+adriansuarez@users.noreply.github.com> Date: Thu, 14 Nov 2024 09:26:34 -0500 Subject: [PATCH] Update dependencies and other maintenance (#382) - Bump versions exercised in Helm upgrade test. - Update dependencies and consolidate some dependencies. For example, we had two YAML parsers with similar APIs that were being imported. - Remove usage of deprecated ioutil functions. - Reorganize imports according to Golang conventions, and use consistent import aliases for packages. - Remove code related to obsolete Tiller operator. --- go.mod | 4 +- go.sum | 4 +- incubator/cloudbeaver/Chart.yaml | 2 - incubator/demo-ycsb/Chart.yaml | 1 - scripts/ci/install_deps.sh | 5 - stable/admin/Chart.yaml | 1 - stable/database/Chart.yaml | 1 - stable/restore/Chart.yaml | 1 - stable/storage-class/Chart.yaml | 1 - stable/transparent-hugepage/Chart.yaml | 1 - test/integration/database_marshall_test.go | 8 +- test/integration/kube_config_marshall_test.go | 43 +++++--- test/integration/process_marshall_test.go | 4 +- .../registry_entry_marshall_test.go | 11 +- test/integration/shell_scripts_lint_test.go | 3 +- test/integration/template_admin_test.go | 70 ++++++------ test/integration/template_backup_test.go | 9 +- test/integration/template_collector_test.go | 26 ++--- .../template_database_restore_test.go | 2 +- test/integration/template_database_test.go | 103 +++++++++--------- test/integration/template_resources_test.go | 3 +- test/integration/template_restore_test.go | 2 +- .../template_storage_class_test.go | 8 +- test/integration/template_ycsb_test.go | 13 +-- test/minikube/minikube_base_admin_test.go | 5 - test/minikube/minikube_base_collector_test.go | 10 +- test/minikube/minikube_base_database_test.go | 15 +-- test/minikube/minikube_base_restore_test.go | 20 +--- test/minikube/minikube_base_tde_test.go | 16 +-- test/minikube/minikube_base_thp_test.go | 9 +- test/minikube/minikube_crash_handling_test.go | 26 ++--- test/minikube/minikube_domain_resync_test.go | 17 +-- test/minikube/minikube_e2e_demo_test.go | 8 +- .../minikube/minikube_external_access_test.go | 12 +- test/minikube/minikube_fsfreeze_test.go | 10 +- test/minikube/minikube_kaa_additions_test.go | 34 +++--- test/minikube/minikube_large_instance_test.go | 6 +- test/minikube/minikube_long_admin_test.go | 11 +- .../minikube_long_journal_migration_test.go | 5 +- test/minikube/minikube_long_restore_test.go | 17 +-- test/minikube/minikube_multitenant_test.go | 15 +-- test/minikube/minikube_priority_class_test.go | 11 +- .../minikube/minikube_rolling_upgrade_test.go | 25 ++--- .../minikube_security_context_test.go | 17 ++- test/minikube/minikube_test_test.go | 4 +- test/minikube/minikube_tls_admin_test.go | 14 +-- test/minikube/minikube_tls_rotation_test.go | 15 +-- test/minikube/minikube_upgrade_helm_test.go | 61 ++++------- test/minikube/verify_utility.go | 3 +- .../minikube_base_multicluster_test.go | 11 +- test/testlib/NuoDBKubeConfig.go | 12 +- test/testlib/NuoDBRegistryEntry.go | 12 +- test/testlib/core_recovery.go | 6 +- test/testlib/haproxy_utilities.go | 4 +- test/testlib/hashicorp_utilities.go | 13 ++- test/testlib/minikube_utilities.go | 66 ++++------- test/testlib/multicluster_utilities.go | 10 +- test/testlib/nuodb_admin_utilities.go | 10 +- test/testlib/nuodb_database_utilities.go | 18 ++- test/testlib/secrets.go | 6 +- test/testlib/template_utilities.go | 57 +++++----- test/testlib/tls.go | 8 +- 62 files changed, 396 insertions(+), 549 deletions(-) diff --git a/go.mod b/go.mod index f52a70355..442a09b89 100644 --- a/go.mod +++ b/go.mod @@ -5,14 +5,13 @@ go 1.21 toolchain go1.21.6 require ( - github.com/Masterminds/semver v1.5.0 + github.com/Masterminds/semver/v3 v3.3.0 github.com/ghodss/yaml v1.0.0 github.com/google/go-cmp v0.6.0 github.com/gruntwork-io/terratest v0.46.8 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/otiai10/copy v1.14.0 github.com/stretchr/testify v1.8.4 - gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 ) @@ -68,6 +67,7 @@ require ( google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/client-go v0.29.0 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 // indirect diff --git a/go.sum b/go.sum index c933b0c39..89e8867a1 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.49.12 h1:SbGHDdMjtuTL8zpRXKjvIvQHLt9cCqcxcHoJps23WxI= diff --git a/incubator/cloudbeaver/Chart.yaml b/incubator/cloudbeaver/Chart.yaml index d667f4c5e..1de3ada38 100644 --- a/incubator/cloudbeaver/Chart.yaml +++ b/incubator/cloudbeaver/Chart.yaml @@ -14,5 +14,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: 22.1.4 -tillerVersion: ">=2.9" - diff --git a/incubator/demo-ycsb/Chart.yaml b/incubator/demo-ycsb/Chart.yaml index 2f0e60a4f..7e19e4cbf 100644 --- a/incubator/demo-ycsb/Chart.yaml +++ b/incubator/demo-ycsb/Chart.yaml @@ -13,4 +13,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/scripts/ci/install_deps.sh b/scripts/ci/install_deps.sh index 8161ae569..97e321e9b 100755 --- a/scripts/ci/install_deps.sh +++ b/scripts/ci/install_deps.sh @@ -100,12 +100,7 @@ elif [[ "$REQUIRES_MINISHIFT" == "true" ]]; then oc status kubectl cluster-info - - kubectl -n kube-system create serviceaccount tiller-system - kubectl create clusterrolebinding tiller-system --clusterrole cluster-admin --serviceaccount=kube-system:tiller-system - helm version - kubectl version # disable THP to match minikube diff --git a/stable/admin/Chart.yaml b/stable/admin/Chart.yaml index 6531c1f18..fd7bd46fb 100644 --- a/stable/admin/Chart.yaml +++ b/stable/admin/Chart.yaml @@ -13,4 +13,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/stable/database/Chart.yaml b/stable/database/Chart.yaml index a1ac3969b..e7c42023d 100644 --- a/stable/database/Chart.yaml +++ b/stable/database/Chart.yaml @@ -13,4 +13,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/stable/restore/Chart.yaml b/stable/restore/Chart.yaml index 705a8097c..904843ee4 100644 --- a/stable/restore/Chart.yaml +++ b/stable/restore/Chart.yaml @@ -14,4 +14,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/stable/storage-class/Chart.yaml b/stable/storage-class/Chart.yaml index 69f6d696a..743569be8 100644 --- a/stable/storage-class/Chart.yaml +++ b/stable/storage-class/Chart.yaml @@ -13,4 +13,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/stable/transparent-hugepage/Chart.yaml b/stable/transparent-hugepage/Chart.yaml index 1f9a13c94..cf6ee17de 100644 --- a/stable/transparent-hugepage/Chart.yaml +++ b/stable/transparent-hugepage/Chart.yaml @@ -14,4 +14,3 @@ maintainers: engine: gotpl icon: https://raw.githubusercontent.com/nuodb/nuodb-helm-charts/master/images/nuodb.svg appVersion: "6.0" -tillerVersion: ">=2.9" diff --git a/test/integration/database_marshall_test.go b/test/integration/database_marshall_test.go index ed9b2e0f0..ab33982b7 100644 --- a/test/integration/database_marshall_test.go +++ b/test/integration/database_marshall_test.go @@ -1,15 +1,15 @@ package integration import ( - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" + "testing" + "github.com/stretchr/testify/assert" - "testing" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestDatabaseUnmarshal(t *testing.T) { - s := ( - `{ + s := (`{ "incarnation": { "major": 1, "minor": 2 diff --git a/test/integration/kube_config_marshall_test.go b/test/integration/kube_config_marshall_test.go index 1576f4e74..f6ac1a554 100644 --- a/test/integration/kube_config_marshall_test.go +++ b/test/integration/kube_config_marshall_test.go @@ -1,16 +1,16 @@ package integration import ( - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" + "os" + "testing" + "github.com/stretchr/testify/assert" - "io/ioutil" - "testing" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) - func TestKubeConfigUnmarshall(t *testing.T) { - content, err := ioutil.ReadFile("../files/nuodb-dump.json") + content, err := os.ReadFile("../files/nuodb-dump.json") assert.NoError(t, err) err, objects := testlib.UnmarshalNuoDBKubeConfig(string(content)) @@ -26,27 +26,36 @@ func TestKubeConfigUnmarshall(t *testing.T) { assert.True(t, len(config.Volumes) == 3) // StatefulSets - assert.True(t, func() bool {_, ok := config.StatefulSets["admin-u7mxhy-nuodb-cluster0"]; return ok}()) - assert.True(t, func() bool {_, ok := config.StatefulSets["sm-database-vslldk-nuodb-cluster0-demo"]; return ok}()) - assert.True(t, func() bool {_, ok := config.StatefulSets["sm-database-vslldk-nuodb-cluster0-demo-hotcopy"]; return ok}()) + assert.True(t, func() bool { _, ok := config.StatefulSets["admin-u7mxhy-nuodb-cluster0"]; return ok }()) + assert.True(t, func() bool { _, ok := config.StatefulSets["sm-database-vslldk-nuodb-cluster0-demo"]; return ok }()) + assert.True(t, func() bool { _, ok := config.StatefulSets["sm-database-vslldk-nuodb-cluster0-demo-hotcopy"]; return ok }()) // Deployments - assert.True(t, func() bool {_, ok := config.Deployments["te-database-vslldk-nuodb-cluster0-demo"]; return ok}()) + assert.True(t, func() bool { _, ok := config.Deployments["te-database-vslldk-nuodb-cluster0-demo"]; return ok }()) // Admin Volumes - assert.True(t, func() bool {_, ok := config.Volumes["raftlog-admin-u7mxhy-nuodb-cluster0-0"]; return ok}()) + assert.True(t, func() bool { _, ok := config.Volumes["raftlog-admin-u7mxhy-nuodb-cluster0-0"]; return ok }()) // DB Volumes - assert.True(t, func() bool {_, ok := config.Volumes["archive-volume-sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"]; return ok}()) - assert.True(t, func() bool {_, ok := config.Volumes["backup-volume-sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"]; return ok}()) + assert.True(t, func() bool { + _, ok := config.Volumes["archive-volume-sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"] + return ok + }()) + assert.True(t, func() bool { + _, ok := config.Volumes["backup-volume-sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"] + return ok + }()) // Admin Pods - assert.True(t, func() bool {_, ok := config.Pods["admin-u7mxhy-nuodb-cluster0-0"]; return ok}()) - assert.True(t, func() bool {_, ok := config.Pods["job-lb-policy-nearest-zs9jl"]; return ok}()) + assert.True(t, func() bool { _, ok := config.Pods["admin-u7mxhy-nuodb-cluster0-0"]; return ok }()) + assert.True(t, func() bool { _, ok := config.Pods["job-lb-policy-nearest-zs9jl"]; return ok }()) // DB Pods - assert.True(t, func() bool {_, ok := config.Pods["sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"]; return ok}()) - assert.True(t, func() bool {_, ok := config.Pods["hotcopy-demo-job-initial-549rc"]; return ok}()) - assert.True(t, func() bool {_, ok := config.Pods["te-database-vslldk-nuodb-cluster0-demo-65c4cdf487-wbzj9"]; return ok}()) + assert.True(t, func() bool { _, ok := config.Pods["sm-database-vslldk-nuodb-cluster0-demo-hotcopy-0"]; return ok }()) + assert.True(t, func() bool { _, ok := config.Pods["hotcopy-demo-job-initial-549rc"]; return ok }()) + assert.True(t, func() bool { + _, ok := config.Pods["te-database-vslldk-nuodb-cluster0-demo-65c4cdf487-wbzj9"] + return ok + }()) } diff --git a/test/integration/process_marshall_test.go b/test/integration/process_marshall_test.go index 30eb663be..6fbf04690 100644 --- a/test/integration/process_marshall_test.go +++ b/test/integration/process_marshall_test.go @@ -2,11 +2,11 @@ package integration import ( "encoding/json" + "testing" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/assert" - "testing" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestUnmarshall(t *testing.T) { diff --git a/test/integration/registry_entry_marshall_test.go b/test/integration/registry_entry_marshall_test.go index 2fcb22c82..5219f7c3f 100644 --- a/test/integration/registry_entry_marshall_test.go +++ b/test/integration/registry_entry_marshall_test.go @@ -1,23 +1,22 @@ package integration import ( - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" + "testing" + "github.com/stretchr/testify/assert" - "testing" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestRegistryEntryUnmarshal(t *testing.T) { - s := ( -`nuodb: + s := (`nuodb: image: registry: local repository: master tag: latest `) - err, object := testlib.UnmarshalImageYAML(s) - + object, err := testlib.UnmarshalImageYAML(s) assert.NoError(t, err) assert.True(t, object.Nuodb.Image.Registry == "local") diff --git a/test/integration/shell_scripts_lint_test.go b/test/integration/shell_scripts_lint_test.go index f4e15b108..0bae819f2 100644 --- a/test/integration/shell_scripts_lint_test.go +++ b/test/integration/shell_scripts_lint_test.go @@ -8,9 +8,10 @@ import ( "regexp" "testing" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) const shebangPattern = "[#][!][ \t]*.+[/ ](sh|bash)" diff --git a/test/integration/template_admin_test.go b/test/integration/template_admin_test.go index 7e196834a..24054115f 100644 --- a/test/integration/template_admin_test.go +++ b/test/integration/template_admin_test.go @@ -6,17 +6,15 @@ import ( "strings" "testing" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) @@ -50,7 +48,7 @@ func verifyAdminResourceLabels(t *testing.T, releaseName string, options *helm.O } } -func findProjectedSecret(name string, sources []v1.VolumeProjection) (*v1.SecretProjection, bool) { +func findProjectedSecret(name string, sources []corev1.VolumeProjection) (*corev1.SecretProjection, bool) { for _, src := range sources { if src.Secret != nil && src.Secret.Name == name { return src.Secret, true @@ -59,7 +57,7 @@ func findProjectedSecret(name string, sources []v1.VolumeProjection) (*v1.Secret return nil, false } -func verifyTLSSecrets(t assert.TestingT, spec v1.PodSpec, options *helm.Options) { +func verifyTLSSecrets(t assert.TestingT, spec corev1.PodSpec, options *helm.Options) { tlsVolume, found := testlib.GetVolume(spec.Volumes, "tls") assert.True(t, found, "Expected to find tls volume") assert.NotNil(t, tlsVolume.Projected) @@ -108,7 +106,7 @@ func TestAdminDefaultLicense(t *testing.T) { if strings.Contains(part, "nuodb-admin-configuration") { found = true - var object v1.ConfigMap + var object corev1.ConfigMap helm.UnmarshalK8SYaml(t, part, &object) assert.Equal(t, len(object.Data), 0) @@ -146,7 +144,7 @@ func TestAdminLicenseCanBeSet(t *testing.T) { if strings.Contains(part, "nuodb-cluster0-admin-configuration") { found = true - var object v1.ConfigMap + var object corev1.ConfigMap helm.UnmarshalK8SYaml(t, part, &object) val, ok := object.Data["nuodb.lic"] @@ -182,7 +180,7 @@ func TestAdminStatefulSetVPNRenders(t *testing.T) { adminContainer := obj.Spec.Template.Spec.Containers[0] assert.True(t, adminContainer.EnvFrom[0].ConfigMapRef.LocalObjectReference.Name == "test-config") - assert.Contains(t, adminContainer.SecurityContext.Capabilities.Add, v1.Capability("NET_ADMIN")) + assert.Contains(t, adminContainer.SecurityContext.Capabilities.Add, corev1.Capability("NET_ADMIN")) assert.Equal(t, "nuoadmin", adminContainer.Args[0]) assert.Equal(t, "--", adminContainer.Args[1]) @@ -248,7 +246,7 @@ func TestAdminClusterServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb-clusterip", obj.Name) - assert.Equal(t, v1.ServiceTypeClusterIP, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) } } @@ -266,7 +264,7 @@ func TestAdminHeadlessServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb", obj.Name) - assert.Equal(t, v1.ServiceTypeClusterIP, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, obj.Spec.Type) assert.Equal(t, "None", obj.Spec.ClusterIP) } } @@ -288,7 +286,7 @@ func TestAdminServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) assert.Contains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-internal") assert.Contains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-scheme") @@ -300,7 +298,7 @@ func TestAdminServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) assert.Equal(t, obj.Annotations["service.beta.kubernetes.io/aws-load-balancer-scheme"], "internet-facing") } @@ -310,7 +308,7 @@ func TestAdminServiceRenders(t *testing.T) { output = helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/service.yaml"}) for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Equal(t, obj.Annotations["service.beta.kubernetes.io/aws-load-balancer-name"], "nuodb-admin-nlb") assert.NotContains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-scheme") } @@ -334,7 +332,7 @@ func TestAdminNodePortServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "nuodb-nodeport", obj.Name) - assert.Equal(t, v1.ServiceTypeNodePort, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeNodePort, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) assert.NotContains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-internal") } @@ -344,7 +342,7 @@ func TestAdminStatefulSetVolumes(t *testing.T) { // Path to the helm chart we will test helmChartPath := "../../stable/admin" - findEphemeralVolume := func(volumes []v1.Volume) *v1.Volume { + findEphemeralVolume := func(volumes []corev1.Volume) *corev1.Volume { for _, volume := range volumes { if volume.Name == "eph-volume" { return &volume @@ -354,7 +352,7 @@ func TestAdminStatefulSetVolumes(t *testing.T) { } // Returns a map of mount point to subpath for all eph-volume mounts - findEphemeralVolumeMounts := func(mounts []v1.VolumeMount) map[string]string { + findEphemeralVolumeMounts := func(mounts []corev1.VolumeMount) map[string]string { ret := make(map[string]string) for _, mount := range mounts { if mount.Name == "eph-volume" { @@ -364,7 +362,7 @@ func TestAdminStatefulSetVolumes(t *testing.T) { return ret } - assertStorageEquals := func(t *testing.T, volume *v1.Volume, size string) { + assertStorageEquals := func(t *testing.T, volume *corev1.Volume, size string) { quantity, err := resource.ParseQuantity(size) assert.NoError(t, err) assert.Equal(t, volume.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests.Storage(), &quantity) @@ -974,7 +972,7 @@ func TestAdminSecurityContext(t *testing.T) { assert.True(t, *containerSecurityContext.ReadOnlyRootFilesystem) // Check that /tmp directory has ephemeral volume mounted to it - var tmpVolumeMount *v1.VolumeMount + var tmpVolumeMount *corev1.VolumeMount for _, volumeMount := range container.VolumeMounts { if volumeMount.MountPath == "/tmp" { tmpVolumeMount = volumeMount.DeepCopy() @@ -985,7 +983,7 @@ func TestAdminSecurityContext(t *testing.T) { assert.Equal(t, "tmp", tmpVolumeMount.SubPath) // Check that NUODOCKER_CONF_DIR=/tmp for generated nuoadmin.conf - var confDirEnv *v1.EnvVar + var confDirEnv *corev1.EnvVar for _, env := range container.Env { if env.Name == "NUODOCKER_CONF_DIR" { confDirEnv = env.DeepCopy() @@ -1008,7 +1006,7 @@ func TestAdminSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 1) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Add, v1.Capability("NET_ADMIN")) + assert.Contains(t, containerSecurityContext.Capabilities.Add, corev1.Capability("NET_ADMIN")) assert.Nil(t, containerSecurityContext.Capabilities.Drop) } }) @@ -1025,7 +1023,7 @@ func TestAdminSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 1) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Drop, v1.Capability("CAP_NET_RAW")) + assert.Contains(t, containerSecurityContext.Capabilities.Drop, corev1.Capability("CAP_NET_RAW")) assert.Nil(t, containerSecurityContext.Capabilities.Add) } }) @@ -1175,7 +1173,7 @@ func TestAdminSecurityContext(t *testing.T) { }) } -func getContainerNamed(containers []v1.Container, name string) (*v1.Container, error) { +func getContainerNamed(containers []corev1.Container, name string) (*corev1.Container, error) { var containerNames string for _, container := range containers { if container.Name == name { @@ -1458,18 +1456,18 @@ func TestAdminTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 1) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODB_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODB_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: "password", }, })) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODB_TRUSTSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODB_TRUSTSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsTrustStore.secret"], }, Key: "password", @@ -1499,18 +1497,18 @@ func TestAdminTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 1) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODB_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODB_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: options.SetValues["admin.tlsKeyStore.passwordKey"], }, })) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODB_TRUSTSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODB_TRUSTSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsTrustStore.secret"], }, Key: options.SetValues["admin.tlsTrustStore.passwordKey"], diff --git a/test/integration/template_backup_test.go b/test/integration/template_backup_test.go index f579c3693..807fee630 100644 --- a/test/integration/template_backup_test.go +++ b/test/integration/template_backup_test.go @@ -6,11 +6,12 @@ import ( "testing" "github.com/gruntwork-io/terratest/modules/helm" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyBackupResourceLabels(t *testing.T, options *helm.Options, obj metav1.Object) { @@ -323,7 +324,7 @@ func TestDatabaseBackupCronJobRestartPolicyDefault(t *testing.T) { output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/cronjob.yaml"}) for _, job := range testlib.SplitAndRenderCronJob(t, output, 2) { - assert.Equal(t, job.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy, v1.RestartPolicyOnFailure) + assert.Equal(t, job.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy, corev1.RestartPolicyOnFailure) } } @@ -341,7 +342,7 @@ func TestDatabaseBackupCronJobRestartPolicyOverride(t *testing.T) { output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/cronjob.yaml"}) for _, job := range testlib.SplitAndRenderCronJob(t, output, 2) { - assert.Equal(t, job.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy, v1.RestartPolicyNever) + assert.Equal(t, job.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy, corev1.RestartPolicyNever) } } diff --git a/test/integration/template_collector_test.go b/test/integration/template_collector_test.go index 524e493dc..039f12411 100644 --- a/test/integration/template_collector_test.go +++ b/test/integration/template_collector_test.go @@ -5,17 +5,15 @@ import ( "strings" "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - v1 "k8s.io/api/core/v1" - - "github.com/gruntwork-io/terratest/modules/helm" + corev1 "k8s.io/api/core/v1" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) -func checkSidecarContainers(t *testing.T, containers []v1.Container, options *helm.Options, chartPath string) { +func checkSidecarContainers(t *testing.T, containers []corev1.Container, options *helm.Options, chartPath string) { require.NotEmpty(t, containers) found := 0 securityContextEnabled := options.SetValues["admin.securityContext.enabledOnContainer"] == "true" || @@ -38,21 +36,21 @@ func checkSidecarContainers(t *testing.T, containers []v1.Container, options *he options.SetValues["nuocollector.watcher.registry"], options.SetValues["nuocollector.watcher.repository"], options.SetValues["nuocollector.watcher.tag"])) - assert.Contains(t, container.Env, v1.EnvVar{ + assert.Contains(t, container.Env, corev1.EnvVar{ Name: "FOLDER", Value: "/etc/telegraf/telegraf.d/dynamic/", }) - assert.Contains(t, container.Env, v1.EnvVar{ + assert.Contains(t, container.Env, corev1.EnvVar{ Name: "REQ_URL", Value: "http://127.0.0.1:5000/reload", }) if chartPath == testlib.ADMIN_HELM_CHART_PATH { - assert.Contains(t, container.Env, v1.EnvVar{ + assert.Contains(t, container.Env, corev1.EnvVar{ Name: "LABEL", Value: "nuodb.com/nuocollector-plugin in (release-name-nuodb-cluster0-admin, insights)", }) } else { - assert.Contains(t, container.Env, v1.EnvVar{ + assert.Contains(t, container.Env, corev1.EnvVar{ Name: "LABEL", Value: "nuodb.com/nuocollector-plugin in (release-name-nuodb-cluster0-demo-database, insights)", }) @@ -61,18 +59,18 @@ func checkSidecarContainers(t *testing.T, containers []v1.Container, options *he // This is probably the main container continue } - assert.Contains(t, container.VolumeMounts, v1.VolumeMount{ + assert.Contains(t, container.VolumeMounts, corev1.VolumeMount{ Name: "eph-volume", MountPath: "/etc/telegraf/telegraf.d/dynamic/", SubPath: "telegraf", }) if logPersistenceEnabled { - assert.Contains(t, container.VolumeMounts, v1.VolumeMount{ + assert.Contains(t, container.VolumeMounts, corev1.VolumeMount{ Name: "log-volume", MountPath: "/var/log/nuodb", }) } else { - assert.Contains(t, container.VolumeMounts, v1.VolumeMount{ + assert.Contains(t, container.VolumeMounts, corev1.VolumeMount{ Name: "eph-volume", MountPath: "/var/log/nuodb", SubPath: "log", @@ -96,7 +94,7 @@ func checkSidecarContainers(t *testing.T, containers []v1.Container, options *he assert.Equal(t, expectedContainersCount, found) } -func checkSpecVolumes(t *testing.T, volumes []v1.Volume, options *helm.Options, chartPath string) { +func checkSpecVolumes(t *testing.T, volumes []corev1.Volume, options *helm.Options, chartPath string) { if options.SetValues["nuocollector.enabled"] == "false" { return } @@ -109,7 +107,7 @@ func checkSpecVolumes(t *testing.T, volumes []v1.Volume, options *helm.Options, assert.Fail(t, "eph-volume should be declared as volume") } -func checkPluginsRendered(t *testing.T, configMaps []v1.ConfigMap, options *helm.Options, chartPath string, expectedNrPlugins int) { +func checkPluginsRendered(t *testing.T, configMaps []corev1.ConfigMap, options *helm.Options, chartPath string, expectedNrPlugins int) { found := 0 for _, cm := range configMaps { if labelValue, ok := cm.Labels["nuodb.com/nuocollector-plugin"]; ok { diff --git a/test/integration/template_database_restore_test.go b/test/integration/template_database_restore_test.go index 500e6d685..a7ca9e145 100644 --- a/test/integration/template_database_restore_test.go +++ b/test/integration/template_database_restore_test.go @@ -3,10 +3,10 @@ package integration import ( "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) diff --git a/test/integration/template_database_test.go b/test/integration/template_database_test.go index 01ba988f7..a8de7df9a 100644 --- a/test/integration/template_database_test.go +++ b/test/integration/template_database_test.go @@ -6,16 +6,15 @@ import ( "strings" "testing" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) @@ -126,7 +125,7 @@ func TestDatabaseClusterServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { // Only the ClusterIP service targeting only TEs in this TE group will // be rendered by default - assert.Equal(t, v1.ServiceTypeClusterIP, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) assert.Equal(t, "te", obj.Spec.Selector["component"]) assert.Equal(t, "release-name-nuodb-cluster0-demo-database", obj.Spec.Selector["app"]) @@ -147,7 +146,7 @@ func TestDatabaseClusterDirectServiceRenders(t *testing.T) { output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/service-clusterip.yaml"}) for _, obj := range testlib.SplitAndRenderService(t, output, 2) { - assert.Equal(t, v1.ServiceTypeClusterIP, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) if obj.Name == "demo-clusterip" { @@ -174,7 +173,7 @@ func TestDatabaseHeadlessServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "demo", obj.Name) - assert.Equal(t, v1.ServiceTypeClusterIP, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, obj.Spec.Type) assert.Equal(t, "te", obj.Spec.Selector["component"]) assert.Equal(t, "None", obj.Spec.ClusterIP) } @@ -197,7 +196,7 @@ func TestDatabaseServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "release-name-nuodb-cluster0-demo-database-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Equal(t, "release-name-nuodb-cluster0-demo-database", obj.Spec.Selector["app"]) assert.Equal(t, "te", obj.Spec.Selector["component"]) assert.Contains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-internal") @@ -209,7 +208,7 @@ func TestDatabaseServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "release-name-nuodb-cluster0-demo-database-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Empty(t, obj.Spec.ClusterIP) assert.Equal(t, obj.Annotations["service.beta.kubernetes.io/aws-load-balancer-type"], "external") assert.Equal(t, obj.Annotations["service.beta.kubernetes.io/aws-load-balancer-nlb-target-type"], "ip") @@ -221,7 +220,7 @@ func TestDatabaseServiceRenders(t *testing.T) { output = helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/service.yaml"}) for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "release-name-nuodb-cluster0-demo-database-balancer", obj.Name) - assert.Equal(t, v1.ServiceTypeLoadBalancer, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeLoadBalancer, obj.Spec.Type) assert.Equal(t, obj.Annotations["service.beta.kubernetes.io/aws-load-balancer-name"], "nuodb-demo-nlb") assert.NotContains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-scheme") } @@ -246,7 +245,7 @@ func TestDatabaseNodePortServiceRenders(t *testing.T) { for _, obj := range testlib.SplitAndRenderService(t, output, 1) { assert.Equal(t, "release-name-nuodb-cluster0-demo-database-nodeport", obj.Name) - assert.Equal(t, v1.ServiceTypeNodePort, obj.Spec.Type) + assert.Equal(t, corev1.ServiceTypeNodePort, obj.Spec.Type) assert.Equal(t, "release-name-nuodb-cluster0-demo-database", obj.Spec.Selector["app"]) assert.Equal(t, "te", obj.Spec.Selector["component"]) assert.NotContains(t, obj.Annotations, "service.beta.kubernetes.io/aws-load-balancer-internal") @@ -410,7 +409,7 @@ func TestDatabaseVolumes(t *testing.T) { // Path to the helm chart we will test helmChartPath := testlib.DATABASE_HELM_CHART_PATH - findEphemeralVolume := func(volumes []v1.Volume) *v1.Volume { + findEphemeralVolume := func(volumes []corev1.Volume) *corev1.Volume { for _, volume := range volumes { if volume.Name == "eph-volume" { return &volume @@ -420,7 +419,7 @@ func TestDatabaseVolumes(t *testing.T) { } // Returns a map of mount point to subpath for all eph-volume mounts - findEphemeralVolumeMounts := func(mounts []v1.VolumeMount) map[string]string { + findEphemeralVolumeMounts := func(mounts []corev1.VolumeMount) map[string]string { ret := make(map[string]string) for _, mount := range mounts { if mount.Name == "eph-volume" { @@ -430,7 +429,7 @@ func TestDatabaseVolumes(t *testing.T) { return ret } - assertStorageEquals := func(t *testing.T, volume *v1.Volume, size string) { + assertStorageEquals := func(t *testing.T, volume *corev1.Volume, size string) { quantity, err := resource.ParseQuantity(size) assert.NoError(t, err) assert.Equal(t, volume.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests.Storage(), &quantity) @@ -811,7 +810,7 @@ func TestDatabaseOtherOptions(t *testing.T) { assert.NotContains(t, args, "") } - basicEnvChecks := func(args []v1.EnvVar) { + basicEnvChecks := func(args []corev1.EnvVar) { assert.True(t, testlib.EnvContains(args, "NUODOCKER_KEYSTORE_PASSWORD", "changeIt")) } @@ -854,9 +853,9 @@ func TestDatabaseCustomEnv(t *testing.T) { ValuesFiles: []string{"../files/database-env.yaml"}, } - basicEnvChecks := func(args []v1.EnvVar) { - expectedAltAddress := v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ + basicEnvChecks := func(args []corev1.EnvVar) { + expectedAltAddress := corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ FieldPath: "status.podIP", }, } @@ -897,8 +896,8 @@ func TestDatabaseVPNRenders(t *testing.T) { }, } - basicChecks := func(args []v1.Container) { - assert.Contains(t, args[0].SecurityContext.Capabilities.Add, v1.Capability("NET_ADMIN")) + basicChecks := func(args []corev1.Container) { + assert.Contains(t, args[0].SecurityContext.Capabilities.Add, corev1.Capability("NET_ADMIN")) assert.True(t, testlib.EnvFromSourceContains(args[0].EnvFrom, "test-config")) } @@ -989,7 +988,7 @@ func TestReadinessProbe(t *testing.T) { }, } - basicChecks := func(spec v1.PodSpec) { + basicChecks := func(spec corev1.PodSpec) { container := spec.Containers[0] require.NotNil(t, container.ReadinessProbe) require.NotNil(t, container.ReadinessProbe.Exec) @@ -1035,7 +1034,7 @@ func TestReadinessProbeFallback(t *testing.T) { }, } - basicChecks := func(spec v1.PodSpec) { + basicChecks := func(spec corev1.PodSpec) { container := spec.Containers[0] require.NotNil(t, container.ReadinessProbe) require.NotNil(t, container.ReadinessProbe.Exec) @@ -1074,7 +1073,7 @@ func TestReadinessProbeDefault(t *testing.T) { SetValues: map[string]string{}, } - basicChecks := func(spec v1.PodSpec) { + basicChecks := func(spec corev1.PodSpec) { container := spec.Containers[0] require.NotNil(t, container.ReadinessProbe) require.NotNil(t, container.ReadinessProbe.Exec) @@ -1356,7 +1355,7 @@ func TestDatabaseSeparateJournal(t *testing.T) { claim, ok := testlib.GetVolumeClaim(obj.Spec.VolumeClaimTemplates, "journal-volume") assert.True(t, ok, "volume journal-volume not found") - assert.Equal(t, v1.ReadWriteOnce, claim.Spec.AccessModes[0]) + assert.Equal(t, corev1.ReadWriteOnce, claim.Spec.AccessModes[0]) assert.Nil(t, claim.Spec.StorageClassName) } }) @@ -1387,7 +1386,7 @@ func TestDatabaseSeparateJournal(t *testing.T) { claim, ok := testlib.GetVolumeClaim(obj.Spec.VolumeClaimTemplates, "journal-volume") assert.True(t, ok, "volume journal-volume not found") - assert.Equal(t, v1.ReadWriteMany, claim.Spec.AccessModes[0]) + assert.Equal(t, corev1.ReadWriteMany, claim.Spec.AccessModes[0]) assert.EqualValues(t, "non-default", *claim.Spec.StorageClassName) } }) @@ -1732,13 +1731,13 @@ func TestDatabaseSecurityContext(t *testing.T) { }, } - checkContainer := func(t *testing.T, container v1.Container) { + checkContainer := func(t *testing.T, container corev1.Container) { containerSecurityContext := container.SecurityContext assert.NotNil(t, containerSecurityContext) assert.True(t, *containerSecurityContext.ReadOnlyRootFilesystem) // Check that /tmp directory has ephemeral volume mounted to it - var tmpVolumeMount *v1.VolumeMount + var tmpVolumeMount *corev1.VolumeMount for _, volumeMount := range container.VolumeMounts { if volumeMount.MountPath == "/tmp" { tmpVolumeMount = volumeMount.DeepCopy() @@ -1774,7 +1773,7 @@ func TestDatabaseSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Add, v1.Capability("NET_ADMIN")) + assert.Contains(t, containerSecurityContext.Capabilities.Add, corev1.Capability("NET_ADMIN")) assert.Nil(t, containerSecurityContext.Capabilities.Drop) } // check security context on TE Deployment @@ -1782,7 +1781,7 @@ func TestDatabaseSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderDeployment(t, output, 1) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Add, v1.Capability("NET_ADMIN")) + assert.Contains(t, containerSecurityContext.Capabilities.Add, corev1.Capability("NET_ADMIN")) assert.Nil(t, containerSecurityContext.Capabilities.Drop) } }) @@ -1799,7 +1798,7 @@ func TestDatabaseSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Drop, v1.Capability("CAP_NET_RAW")) + assert.Contains(t, containerSecurityContext.Capabilities.Drop, corev1.Capability("CAP_NET_RAW")) assert.Nil(t, containerSecurityContext.Capabilities.Add) } // check security context on TE Deployment @@ -1807,7 +1806,7 @@ func TestDatabaseSecurityContext(t *testing.T) { for _, obj := range testlib.SplitAndRenderDeployment(t, output, 1) { containerSecurityContext := obj.Spec.Template.Spec.Containers[0].SecurityContext assert.NotNil(t, containerSecurityContext) - assert.Contains(t, containerSecurityContext.Capabilities.Drop, v1.Capability("CAP_NET_RAW")) + assert.Contains(t, containerSecurityContext.Capabilities.Drop, corev1.Capability("CAP_NET_RAW")) assert.Nil(t, containerSecurityContext.Capabilities.Add) } }) @@ -2447,12 +2446,12 @@ func TestDatabaseConfigChecksum(t *testing.T) { }) } -func verifyTopologyConstraints(t *testing.T, name string, obj v1.PodSpec, expectedLabels map[string]string) { +func verifyTopologyConstraints(t *testing.T, name string, obj corev1.PodSpec, expectedLabels map[string]string) { require.Equal(t, 1, len(obj.TopologySpreadConstraints)) constraint := obj.TopologySpreadConstraints[0] assert.Equal(t, int32(1), constraint.MaxSkew) assert.Equal(t, "topology.kubernetes.io/zone", constraint.TopologyKey) - assert.Equal(t, v1.DoNotSchedule, constraint.WhenUnsatisfiable) + assert.Equal(t, corev1.DoNotSchedule, constraint.WhenUnsatisfiable) msg, ok := testlib.MapContains(constraint.LabelSelector.MatchLabels, expectedLabels) assert.Truef(t, ok, "Unexpected labels in topologySpreadConstraints for resource %s: %s", name, msg) } @@ -2697,9 +2696,9 @@ func TestDatabaseTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODOCKER_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODOCKER_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: "password", @@ -2712,9 +2711,9 @@ func TestDatabaseTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderDeployment(t, output, 1) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODOCKER_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODOCKER_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: "password", @@ -2741,9 +2740,9 @@ func TestDatabaseTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODOCKER_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODOCKER_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: options.SetValues["admin.tlsKeyStore.passwordKey"], @@ -2756,9 +2755,9 @@ func TestDatabaseTLSConfig(t *testing.T) { for _, obj := range testlib.SplitAndRenderDeployment(t, output, 1) { verifyTLSSecrets(t, obj.Spec.Template.Spec, options) assert.True(t, testlib.EnvContainsValueFrom(obj.Spec.Template.Spec.Containers[0].Env, - "NUODOCKER_KEYSTORE_PASSWORD", &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + "NUODOCKER_KEYSTORE_PASSWORD", &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: options.SetValues["admin.tlsKeyStore.secret"], }, Key: options.SetValues["admin.tlsKeyStore.passwordKey"], @@ -2917,7 +2916,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { }, } output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/statefulset.yaml"}) - var sidecar *v1.Container + var sidecar *corev1.Container for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { for _, container := range obj.Spec.Template.Spec.Containers { if container.Name == "backup-hooks" { @@ -2959,7 +2958,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { assert.Contains(t, sidecar.Image, "docker.io/nuodb/nuodb") // Check that configmap for backup hooks was rendered - var backupHooksCm *v1.ConfigMap + var backupHooksCm *corev1.ConfigMap output = helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/configmap.yaml"}) for _, cm := range testlib.SplitAndRenderConfigMap(t, output, 6) { if strings.HasSuffix(cm.Name, "backup-hooks") { @@ -2985,7 +2984,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { }, } output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/statefulset.yaml"}) - var sidecar *v1.Container + var sidecar *corev1.Container for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { for _, container := range obj.Spec.Template.Spec.Containers { if container.Name == "backup-hooks" { @@ -3027,7 +3026,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { assert.Contains(t, sidecar.Image, "docker.io/library/python:3.12-slim") // Check that configmap for backup hooks was rendered - var backupHooksCm *v1.ConfigMap + var backupHooksCm *corev1.ConfigMap output = helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/configmap.yaml"}) for _, cm := range testlib.SplitAndRenderConfigMap(t, output, 6) { if strings.HasSuffix(cm.Name, "backup-hooks") { @@ -3054,7 +3053,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { }, } output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/statefulset.yaml"}) - var sidecar *v1.Container + var sidecar *corev1.Container for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { for _, container := range obj.Spec.Template.Spec.Containers { if container.Name == "backup-hooks" { @@ -3093,7 +3092,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { }, } output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/statefulset.yaml"}) - var sidecar *v1.Container + var sidecar *corev1.Container for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { for _, container := range obj.Spec.Template.Spec.Containers { if container.Name == "backup-hooks" { @@ -3139,7 +3138,7 @@ func TestDatabaseStatefulSetBackupHooksSidecar(t *testing.T) { }, } output := helm.RenderTemplate(t, options, helmChartPath, "release-name", []string{"templates/statefulset.yaml"}) - var sidecar *v1.Container + var sidecar *corev1.Container for _, obj := range testlib.SplitAndRenderStatefulSet(t, output, 2) { for _, container := range obj.Spec.Template.Spec.Containers { if container.Name == "backup-hooks" { diff --git a/test/integration/template_resources_test.go b/test/integration/template_resources_test.go index 9f306fdab..ef427a52f 100644 --- a/test/integration/template_resources_test.go +++ b/test/integration/template_resources_test.go @@ -6,12 +6,11 @@ import ( "strings" "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) diff --git a/test/integration/template_restore_test.go b/test/integration/template_restore_test.go index aa88922a9..afd984ecb 100644 --- a/test/integration/template_restore_test.go +++ b/test/integration/template_restore_test.go @@ -5,11 +5,11 @@ import ( "strings" "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) diff --git a/test/integration/template_storage_class_test.go b/test/integration/template_storage_class_test.go index a566f870e..ee6aff2c7 100644 --- a/test/integration/template_storage_class_test.go +++ b/test/integration/template_storage_class_test.go @@ -2,8 +2,6 @@ package integration import ( "fmt" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/stretchr/testify/assert" "math/rand" "regexp" "strconv" @@ -11,6 +9,9 @@ import ( "time" "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/assert" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func init() { @@ -105,7 +106,6 @@ func TestStorageClassTemplateGcp(t *testing.T) { StorageClassTemplateE(t, options, expectedProvisioner) } - func TestStorageClassTemplateLocal(t *testing.T) { options := &helm.Options{} @@ -118,4 +118,4 @@ func TestStorageClassTemplateLocal(t *testing.T) { for _, obj := range testlib.SplitAndRenderStorageClass(t, output, 1) { assert.EqualValues(t, "kubernetes.io/no-provisioner", obj.Provisioner) } -} \ No newline at end of file +} diff --git a/test/integration/template_ycsb_test.go b/test/integration/template_ycsb_test.go index 759237d85..5b9f721a3 100644 --- a/test/integration/template_ycsb_test.go +++ b/test/integration/template_ycsb_test.go @@ -1,15 +1,14 @@ package integration import ( - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/stretchr/testify/assert" "strings" "testing" - - v1 "k8s.io/api/core/v1" - "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestYcsbConfigMapRenders(t *testing.T) { @@ -35,7 +34,7 @@ func TestYcsbConfigMapRenders(t *testing.T) { continue } - var object v1.ConfigMap + var object corev1.ConfigMap helm.UnmarshalK8SYaml(t, part, &object) configMapCount += 1 @@ -81,4 +80,4 @@ func TestYcsbRCReplicas(t *testing.T) { assert.Equal(t, "ycsb-load", obj.Name) assert.EqualValues(t, 1, *obj.Spec.Replicas) } -} \ No newline at end of file +} diff --git a/test/minikube/minikube_base_admin_test.go b/test/minikube/minikube_base_admin_test.go index 8af24c9bb..5ae002912 100644 --- a/test/minikube/minikube_base_admin_test.go +++ b/test/minikube/minikube_base_admin_test.go @@ -23,7 +23,6 @@ import ( ) func TestKubernetesBasicAdminSingleReplica(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -65,7 +64,6 @@ func TestKubernetesAdminLicenseSecret(t *testing.T) { if os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot run this test without a valid license") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) @@ -122,7 +120,6 @@ func TestKubernetesAdminLicenseSecret(t *testing.T) { func TestKubernetesInvalidLicense(t *testing.T) { testlib.SkipTestOnNuoDBVersionCondition(t, ">= 6.0.0") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) licenseString := "red-riding-hood" @@ -156,7 +153,6 @@ func TestKubernetesInvalidLicense(t *testing.T) { } func TestKubernetesBasicNameOverride(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := &helm.Options{ @@ -174,7 +170,6 @@ func TestKubernetesBasicNameOverride(t *testing.T) { } func TestKubernetesFullNameOverride(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) nonDefaultName := "nondefault-adminname" diff --git a/test/minikube/minikube_base_collector_test.go b/test/minikube/minikube_base_collector_test.go index 64af6e017..36bfbf97c 100644 --- a/test/minikube/minikube_base_collector_test.go +++ b/test/minikube/minikube_base_collector_test.go @@ -1,3 +1,4 @@ +//go:build long // +build long package minikube @@ -7,12 +8,12 @@ import ( "testing" "time" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" - v12 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) const FILE_PLUGIN_CONFIGMAP = ` @@ -39,7 +40,7 @@ func createOutputFilePlugin(t *testing.T, namespaceName string) { func checkMetricsLine(t *testing.T, namespaceName string, podName string, expectedLine string, minOccurances int) bool { - count := testlib.GetRegexOccurrenceInLog(t, namespaceName, podName, expectedLine, &v12.PodLogOptions{Container: "nuocollector"}) + count := testlib.GetRegexOccurrenceInLog(t, namespaceName, podName, expectedLine, &corev1.PodLogOptions{Container: "nuocollector"}) if count >= minOccurances { t.Logf("Found %d occurances of '%s' in pod %s log", count, expectedLine, podName) return true @@ -96,7 +97,6 @@ func verifyCollectionForDatabase(t *testing.T, namespaceName string, app string, } func TestMetricsCollection(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ diff --git a/test/minikube/minikube_base_database_test.go b/test/minikube/minikube_base_database_test.go index 01b3594b3..f1c0f7ca8 100644 --- a/test/minikube/minikube_base_database_test.go +++ b/test/minikube/minikube_base_database_test.go @@ -9,13 +9,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) const LABEL_CLOUD = "minikube" @@ -153,7 +152,6 @@ func verifyEngineAltAddress(t *testing.T, namespaceName string, adminPod string, } func TestKubernetesBasicDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} @@ -189,7 +187,6 @@ func TestKubernetesBasicDatabase(t *testing.T) { } func TestSmVolumePermissionChange(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) options := helm.Options{} @@ -240,7 +237,6 @@ func TestSmVolumePermissionChange(t *testing.T) { } func TestKubernetesAccessWithinPods(t *testing.T) { - testlib.AwaitTillerUp(t) options := helm.Options{} @@ -280,7 +276,6 @@ func TestKubernetesAccessWithinPods(t *testing.T) { } func TestArchivePvcRecreated(t *testing.T) { - testlib.AwaitTillerUp(t) options := helm.Options{} defer testlib.Teardown(testlib.TEARDOWN_ADMIN) helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &options, 1, "") @@ -448,7 +443,6 @@ func TestHooksCustomHandlers(t *testing.T) { } func TestKubernetesAltAddress(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} @@ -479,7 +473,6 @@ func TestKubernetesAltAddress(t *testing.T) { } func TestKubernetesStartDatabaseShrinkedAdmin(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -529,7 +522,6 @@ func TestKubernetesStartDatabaseShrinkedAdmin(t *testing.T) { } func TestKubernetesSeparateJournalLocation(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} @@ -563,7 +555,6 @@ func TestKubernetesSeparateJournalLocation(t *testing.T) { } func TestKubernetesRestrictedDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ diff --git a/test/minikube/minikube_base_restore_test.go b/test/minikube/minikube_base_restore_test.go index 40f92883f..0f20f99eb 100644 --- a/test/minikube/minikube_base_restore_test.go +++ b/test/minikube/minikube_base_restore_test.go @@ -10,16 +10,14 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - - corev1 "k8s.io/api/core/v1" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyBackup(t *testing.T, namespaceName string, podName string, databaseName string, options *helm.Options) { @@ -37,7 +35,6 @@ func verifyBackup(t *testing.T, namespaceName string, podName string, databaseNa } func TestKubernetesBackupDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) adminOptions := helm.Options{} @@ -87,7 +84,6 @@ func TestKubernetesBackupDatabase(t *testing.T) { } func TestKubernetesBackupHistory(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) adminOptions := helm.Options{} @@ -150,7 +146,6 @@ func TestKubernetesJournalBackupSuspended(t *testing.T) { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } testlib.SkipTestOnNuoDBVersionCondition(t, "< 4.3") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -315,7 +310,6 @@ func restoreDatabaseByArchiveType(t *testing.T, options helm.Options, namespaceN } func TestKubernetesRestoreDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -353,7 +347,6 @@ func TestKubernetesRestoreDatabase(t *testing.T) { } func TestKubernetesImportDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) adminOptions := helm.Options{} @@ -417,7 +410,6 @@ func TestKubernetesAutoRestore(t *testing.T) { if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" && os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot test autoRestore without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -504,7 +496,6 @@ func TestKubernetesAutoRestore(t *testing.T) { } func TestSmRestartPartialSnapshotRestore(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) // Create admin release @@ -593,7 +584,6 @@ spec: // Test exercising backup hooks and volume snapshot restore func runTestKubernetesSnapshotRestore(t *testing.T, preprovisionVolumes bool, inPlaceRestore bool) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) // Create admin release diff --git a/test/minikube/minikube_base_tde_test.go b/test/minikube/minikube_base_tde_test.go index b85964d0c..e873d59e6 100644 --- a/test/minikube/minikube_base_tde_test.go +++ b/test/minikube/minikube_base_tde_test.go @@ -1,25 +1,23 @@ +//go:build long // +build long package minikube import ( "fmt" - "io/ioutil" "os" "strings" "testing" "time" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - corev1 "k8s.io/api/core/v1" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/gruntwork-io/terratest/modules/random" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func applyStoragePasswordSecret(t *testing.T, namespaceName string, name string, passwords []string) { @@ -33,7 +31,7 @@ func applyStoragePasswordSecret(t *testing.T, namespaceName string, name string, } secret, err := k8s.RunKubectlAndGetOutputE(t, opts, kubectlArgs...) require.NoError(t, err) - tmpfile, err := ioutil.TempFile("", "tde_secret") + tmpfile, err := os.CreateTemp("", "tde_secret") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString(secret) @@ -69,7 +67,6 @@ func TestAdminColdStartWithTDE(t *testing.T) { if os.Getenv("NUODB_DEV") != "true" { t.Skip("'tde_monitor' service is not supported in released versions") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -134,7 +131,6 @@ func TestAdminColdStartWithTDE(t *testing.T) { func TestRestoreInPlaceWithTDE(t *testing.T) { testlib.SkipTestOnNuoDBVersionCondition(t, "< 4.1.2") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ diff --git a/test/minikube/minikube_base_thp_test.go b/test/minikube/minikube_base_thp_test.go index 6be995560..fe46864fc 100644 --- a/test/minikube/minikube_base_thp_test.go +++ b/test/minikube/minikube_base_thp_test.go @@ -1,19 +1,19 @@ +//go:build short // +build short package minikube import ( "fmt" - "time" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "strings" "testing" + "time" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func scheduleDefault(t *testing.T, helmChartPath string, namespaceName string) { @@ -89,7 +89,6 @@ func scheduleLabelMismatch(t *testing.T, helmChartPath string, namespaceName str } func TestKubernetesDefaultMinikubeTHP(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) diff --git a/test/minikube/minikube_crash_handling_test.go b/test/minikube/minikube_crash_handling_test.go index 2234780af..f895c928f 100644 --- a/test/minikube/minikube_crash_handling_test.go +++ b/test/minikube/minikube_crash_handling_test.go @@ -1,3 +1,4 @@ +//go:build diagnostics // +build diagnostics package minikube @@ -5,18 +6,19 @@ package minikube import ( "errors" "fmt" - "github.com/Masterminds/semver" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/stretchr/testify/require" - "io/ioutil" + "os" "regexp" "strings" - - corev1 "k8s.io/api/core/v1" "testing" "time" + + "github.com/Masterminds/semver/v3" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyKillAndInfoInLog(t *testing.T, namespaceName string, adminPodName string, podName string) { @@ -39,7 +41,7 @@ func verifyKillAndInfoInLog(t *testing.T, namespaceName string, adminPodName str testlib.AwaitDatabaseUp(t, namespaceName, adminPodName, "demo", 2) oldPodLogFileName := <-ch - buf, err := ioutil.ReadFile(oldPodLogFileName) + buf, err := os.ReadFile(oldPodLogFileName) require.NoError(t, err) fullLog := string(buf) require.Greater(t, strings.Count(fullLog, "Core was generated by"), 0, "Could not find core parsing in log file, full log: %s", fullLog) @@ -59,7 +61,6 @@ func verifyKillAndInfoInLog(t *testing.T, namespaceName string, adminPodName str } func TestKubernetesPrintCores(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -114,7 +115,6 @@ func TestKubernetesPrintCores(t *testing.T) { func TestPermanentLossOfAdmin(t *testing.T) { t.Skip("pod killing is flaky!") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -182,7 +182,6 @@ func GetAdminOfEnginePodE(t *testing.T, namespaceName string, admin0 string, pod } func TestReadWriteManyEnabledManyEngines(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -206,7 +205,6 @@ func TestReadWriteManyEnabledManyEngines(t *testing.T) { } func TestReadWriteManyMultitenancy(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -241,7 +239,6 @@ func TestReadWriteManyMultitenancy(t *testing.T) { } func TestNuoDBKubeDiagnostics(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -275,7 +272,6 @@ func TestNuoDBKubeDiagnostics(t *testing.T) { func TestGetDiagnoseInfoAdminLogPersistence(t *testing.T) { // start an admin with log persistence enabled - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) diff --git a/test/minikube/minikube_domain_resync_test.go b/test/minikube/minikube_domain_resync_test.go index a7372827e..45fe7cf95 100644 --- a/test/minikube/minikube_domain_resync_test.go +++ b/test/minikube/minikube_domain_resync_test.go @@ -15,14 +15,12 @@ import ( "testing" "time" - v12 "k8s.io/api/core/v1" - + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" ) func verifyProcessLabels(t *testing.T, namespaceName string, adminPod string) (archiveVolumeClaims map[string]int) { @@ -86,7 +84,6 @@ func checkInitialMembership(t require.TestingT, configJson string, expectedSize } func TestReprovisionAdmin0(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -102,7 +99,7 @@ func TestReprovisionAdmin0(t *testing.T) { admin1 := adminStatefulSet + "-1" // get OLD logs - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) // check initial membership on admin-0 options := k8s.NewKubectlOptions("", "", namespaceName) @@ -151,7 +148,6 @@ func TestReprovisionAdmin0(t *testing.T) { } func TestAdminScaleDown(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -166,7 +162,7 @@ func TestAdminScaleDown(t *testing.T) { admin1 := adminStatefulSet + "-1" // get OLD logs - go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &corev1.PodLogOptions{Follow: true}) // scale down Admin StatefulSet options := k8s.NewKubectlOptions("", "", namespaceName) @@ -221,7 +217,6 @@ func TestDomainResync(t *testing.T) { t.Skip("Cannot test resync without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -302,7 +297,6 @@ func TestDomainResync(t *testing.T) { } func TestLoadBalancerConfigurationFullResync(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -338,7 +332,6 @@ func TestLoadBalancerConfigurationFullResync(t *testing.T) { } func TestLoadBalancerConfigurationResync(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) diff --git a/test/minikube/minikube_e2e_demo_test.go b/test/minikube/minikube_e2e_demo_test.go index 8d081db19..f1b19a08b 100644 --- a/test/minikube/minikube_e2e_demo_test.go +++ b/test/minikube/minikube_e2e_demo_test.go @@ -5,16 +5,16 @@ package minikube import ( "fmt" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - v12 "k8s.io/api/core/v1" "testing" "time" "github.com/gruntwork-io/terratest/modules/helm" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestKubernetesYCSB(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} @@ -42,7 +42,7 @@ func TestKubernetesYCSB(t *testing.T) { testlib.ScaleYCSB(t, namespaceName, 1) ycsbPodName := testlib.GetPodName(t, namespaceName, testlib.YCSB_CONTROLLER_NAME) - go testlib.GetAppLog(t, namespaceName, ycsbPodName, "-ycsb", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, ycsbPodName, "-ycsb", &corev1.PodLogOptions{Follow: true}) // let YCSB run for a couple of seconds time.Sleep(5 * time.Second) diff --git a/test/minikube/minikube_external_access_test.go b/test/minikube/minikube_external_access_test.go index 834e0adf2..f820e727e 100644 --- a/test/minikube/minikube_external_access_test.go +++ b/test/minikube/minikube_external_access_test.go @@ -15,15 +15,13 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/random" - + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func findServicePortE(service *corev1.Service, portName string) (*corev1.ServicePort, error) { @@ -225,7 +223,6 @@ func verifyProcessExternalAccessLabels(t *testing.T, namespaceName string, admin func TestKubernetesMultipleTEGroups(t *testing.T) { testlib.SkipTestOnNuoDBVersionCondition(t, "< 4.2.4") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) deployMultipleTEGroups := func(t *testing.T, serviceType corev1.ServiceType) { @@ -328,7 +325,6 @@ func TestKubernetesMultipleTEGroups(t *testing.T) { func TestKubernetesIngress(t *testing.T) { // this requires support for "external-address" and "external-port" labels testlib.SkipTestOnNuoDBVersionCondition(t, "< 4.2.3") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) diff --git a/test/minikube/minikube_fsfreeze_test.go b/test/minikube/minikube_fsfreeze_test.go index b300e3143..f1c58c5a9 100644 --- a/test/minikube/minikube_fsfreeze_test.go +++ b/test/minikube/minikube_fsfreeze_test.go @@ -10,14 +10,14 @@ import ( "testing" "time" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + shellquote "github.com/kballard/go-shellquote" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - shellquote "github.com/kballard/go-shellquote" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func isMinikube(t *testing.T) bool { @@ -133,7 +133,6 @@ func TestFsFreezeBackupHook(t *testing.T) { t.Skip("Can only run test on Minikube or Docker Desktop") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) // Prepare CSI driver to enable fsfreeze @@ -251,7 +250,6 @@ func TestFsFreezeBackupHook(t *testing.T) { } func TestHotSnapBackupHook(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := &helm.Options{ diff --git a/test/minikube/minikube_kaa_additions_test.go b/test/minikube/minikube_kaa_additions_test.go index 14d1b779b..6b1563e40 100644 --- a/test/minikube/minikube_kaa_additions_test.go +++ b/test/minikube/minikube_kaa_additions_test.go @@ -5,44 +5,40 @@ package minikube import ( "fmt" - "io/ioutil" + "os" "path/filepath" "testing" "time" - v12 "k8s.io/api/core/v1" - - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/ghodss/yaml" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func modifyKubeInspectorRoleInPlace(t *testing.T, modificationFunc func(role *rbacv1.Role)) { inspectorRoleFile := filepath.Join(testlib.ADMIN_HELM_CHART_PATH, "templates", "role.yaml") - originalData, err := ioutil.ReadFile(inspectorRoleFile) + originalData, err := os.ReadFile(inspectorRoleFile) require.NoError(t, err) - testlib.AddTeardown(testlib.TEARDOWN_ADMIN, func() { ioutil.WriteFile(inspectorRoleFile, originalData, 0644) }) + testlib.AddTeardown(testlib.TEARDOWN_ADMIN, func() { os.WriteFile(inspectorRoleFile, originalData, 0644) }) output := helm.RenderTemplate(t, &helm.Options{}, testlib.ADMIN_HELM_CHART_PATH, "release-name", []string{"templates/role.yaml"}) roles := testlib.SplitAndRenderRole(t, output, 1) modificationFunc(&roles[0]) roleBytes, err := yaml.Marshal(&roles[0]) require.NoError(t, err) - err = ioutil.WriteFile(inspectorRoleFile, roleBytes, 0644) + err = os.WriteFile(inspectorRoleFile, roleBytes, 0644) require.NoError(t, err) - out, _ := ioutil.ReadFile(inspectorRoleFile) + out, _ := os.ReadFile(inspectorRoleFile) t.Log("Modified roles file:\n" + string(out)) } func TestKaaLimitedPermissions(t *testing.T) { // This test requires NuoDB 4.1.1+ - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -72,17 +68,17 @@ func TestKaaLimitedPermissions(t *testing.T) { // Verify that KAA will not register informer for daemonsets testlib.Await(t, func() bool { return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, - "Informer for resource 'daemonsets' not registered", &v12.PodLogOptions{}) == 1 + "Informer for resource 'daemonsets' not registered", &corev1.PodLogOptions{}) == 1 }, 30*time.Second) // Verify that KAA will not register informer for pods testlib.Await(t, func() bool { return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, - "Informer for resource 'pods' not registered", &v12.PodLogOptions{}) == 1 + "Informer for resource 'pods' not registered", &corev1.PodLogOptions{}) == 1 }, 30*time.Second) // Verify that KAA will not register informer for PVCs testlib.Await(t, func() bool { return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, - "Informer for resource 'persistentvolumeclaims' not registered", &v12.PodLogOptions{}) == 1 + "Informer for resource 'persistentvolumeclaims' not registered", &corev1.PodLogOptions{}) == 1 }, 30*time.Second) // Verify that resources that KAA have permissions for are available @@ -96,7 +92,6 @@ func TestKaaLimitedPermissions(t *testing.T) { } func TestKaaRolebindingDisabled(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -112,7 +107,7 @@ func TestKaaRolebindingDisabled(t *testing.T) { // Verify that KAA won't start due to limited mandatory permissions testlib.Await(t, func() bool { return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, - "Not registering event listeners: service account unauthorized for resource 'leases'", &v12.PodLogOptions{}) == 1 + "Not registering event listeners: service account unauthorized for resource 'leases'", &corev1.PodLogOptions{}) == 1 }, 30*time.Second) // Verify that no resources are avaialble via KAA @@ -124,7 +119,6 @@ func TestKaaRolebindingDisabled(t *testing.T) { func TestKubernetesTopologyDiscover(t *testing.T) { testlib.SkipTestOnNuoDBVersionCondition(t, "< 6.0.3") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) currentRegions := testlib.LabelNodesIfMissing(t, "topology.kubernetes.io/region", "test-region") @@ -172,6 +166,6 @@ func TestKubernetesTopologyDiscover(t *testing.T) { require.Equal(t, currentZones[node], process.Labels["zone"]) require.Equal(t, currentRegions[node], process.Labels["region"]) require.Equal(t, 1, testlib.GetStringOccurrenceInLog(t, namespaceName, process.Hostname, - "Looking for admin with labels matching: node zone region", &v12.PodLogOptions{})) + "Looking for admin with labels matching: node zone region", &corev1.PodLogOptions{})) } } diff --git a/test/minikube/minikube_large_instance_test.go b/test/minikube/minikube_large_instance_test.go index ddf3c4466..2b519eb71 100644 --- a/test/minikube/minikube_large_instance_test.go +++ b/test/minikube/minikube_large_instance_test.go @@ -1,3 +1,4 @@ +//go:build large // +build large package minikube @@ -7,14 +8,13 @@ import ( "strings" "testing" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/random" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestHashiCorpVault(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) diff --git a/test/minikube/minikube_long_admin_test.go b/test/minikube/minikube_long_admin_test.go index a7fc2899d..18599ee06 100644 --- a/test/minikube/minikube_long_admin_test.go +++ b/test/minikube/minikube_long_admin_test.go @@ -10,13 +10,13 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/require" - v12 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestKubernetesBasicAdminThreeReplicas(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -66,7 +66,6 @@ func TestDatabaseAdminAffinityLabels(t *testing.T) { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -115,11 +114,11 @@ func TestDatabaseAdminAffinityLabels(t *testing.T) { require.NoError(t, err) for _, process := range processes { require.Equal(t, 1, testlib.GetStringOccurrenceInLog(t, namespaceName, process.Hostname, - "Looking for admin with labels matching: host zone", &v12.PodLogOptions{})) + "Looking for admin with labels matching: host zone", &corev1.PodLogOptions{})) expectedAffinityLog := fmt.Sprintf("Preferring APs %s due to matching label zone=us-east-1", admin0) require.Equal(t, 1, testlib.GetStringOccurrenceInLog(t, namespaceName, process.Hostname, - expectedAffinityLog, &v12.PodLogOptions{}), "Did not find expected log message %s", expectedAffinityLog) + expectedAffinityLog, &corev1.PodLogOptions{}), "Did not find expected log message %s", expectedAffinityLog) } } diff --git a/test/minikube/minikube_long_journal_migration_test.go b/test/minikube/minikube_long_journal_migration_test.go index 0f23e7604..08064a832 100644 --- a/test/minikube/minikube_long_journal_migration_test.go +++ b/test/minikube/minikube_long_journal_migration_test.go @@ -13,13 +13,13 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestChangingJournalLocationFails(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} @@ -57,7 +57,6 @@ func TestChangingJournalLocationWithMultipleSMs(t *testing.T) { t.Skip("Cannot test autoRestore without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{} diff --git a/test/minikube/minikube_long_restore_test.go b/test/minikube/minikube_long_restore_test.go index 89aac3ec4..5ade4aa8a 100644 --- a/test/minikube/minikube_long_restore_test.go +++ b/test/minikube/minikube_long_restore_test.go @@ -11,14 +11,12 @@ import ( "testing" "time" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - corev1 "k8s.io/api/core/v1" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyExternalJournal(t *testing.T, namespaceName string, adminPod string, @@ -68,7 +66,6 @@ func TestKubernetesRestoreMultipleSMs(t *testing.T) { if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" && os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -193,7 +190,6 @@ func TestKubernetesRestoreMultipleBackupGroups(t *testing.T) { if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" && os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -333,7 +329,6 @@ func TestKubernetesRestoreCustomBackupGroups(t *testing.T) { if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" && os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -466,7 +461,6 @@ func TestKubernetesRestoreWithStorageGroups(t *testing.T) { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } testlib.SkipTestOnNuoDBVersionCondition(t, "< 5.0.3") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -595,7 +589,6 @@ func TestKubernetesImportWithStorageGroups(t *testing.T) { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } testlib.SkipTestOnNuoDBVersionCondition(t, "< 5.0.3") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -691,7 +684,6 @@ func TestKubernetesImportWithStorageGroups(t *testing.T) { } func TestKubernetesRestoreDatabaseWithURL(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -760,7 +752,6 @@ func TestKubernetesRestoreDatabaseWithURL(t *testing.T) { } func TestKubernetesImportDatabaseSeparateJournal(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -814,7 +805,6 @@ func TestKubernetesImportDatabaseSeparateJournal(t *testing.T) { } func TestKubernetesRestoreDatabaseSeparateJournal(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -895,7 +885,6 @@ func TestKubernetesRestoreDatabaseSeparateJournal(t *testing.T) { // Extensive end to end tests for creating a database off of a VolumeSnapshot func TestCornerCaseKubernetesSnapshotRestore(t *testing.T) { // Set up domain - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) diff --git a/test/minikube/minikube_multitenant_test.go b/test/minikube/minikube_multitenant_test.go index 700427a18..554346229 100644 --- a/test/minikube/minikube_multitenant_test.go +++ b/test/minikube/minikube_multitenant_test.go @@ -8,27 +8,23 @@ import ( "strings" "testing" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/stretchr/testify/require" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func TestKubernetesMultiTenantDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) - - options := helm.Options{} - defer testlib.Teardown(testlib.TEARDOWN_ADMIN) + options := helm.Options{} helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &options, 1, "") admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName) - defer testlib.Teardown(testlib.TEARDOWN_DATABASE) testlib.AddDiagnosticTeardown(testlib.TEARDOWN_DATABASE, t, func() { @@ -57,7 +53,6 @@ func TestKubernetesMultiTenantDatabase(t *testing.T) { } func TestKubernetesNamespaceCoexistence(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) defer testlib.Teardown(testlib.TEARDOWN_DATABASE) diff --git a/test/minikube/minikube_priority_class_test.go b/test/minikube/minikube_priority_class_test.go index 3a663979c..a83d4441e 100644 --- a/test/minikube/minikube_priority_class_test.go +++ b/test/minikube/minikube_priority_class_test.go @@ -10,19 +10,17 @@ import ( "testing" "time" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" schedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/stretchr/testify/require" - - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/gruntwork-io/terratest/modules/random" ) func TestPriorityClassNonexistent(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) @@ -75,7 +73,6 @@ func TestPriorityClassNonexistent(t *testing.T) { } func TestPriorityClass(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) diff --git a/test/minikube/minikube_rolling_upgrade_test.go b/test/minikube/minikube_rolling_upgrade_test.go index 59c581112..7be71d201 100644 --- a/test/minikube/minikube_rolling_upgrade_test.go +++ b/test/minikube/minikube_rolling_upgrade_test.go @@ -11,17 +11,14 @@ import ( "testing" "time" - corev1 "k8s.io/api/core/v1" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" - v12 "k8s.io/api/core/v1" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) const OLD_RELEASE = "5.0" @@ -38,7 +35,6 @@ func verifyAllProcessesRunning(t *testing.T, namespaceName string, adminPod stri } func TestAdminProbes(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) defer testlib.Teardown(testlib.TEARDOWN_ADMIN) @@ -121,7 +117,6 @@ func TestAdminProbes(t *testing.T) { } func TestKubernetesUpgradeAdminMinorVersion(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -140,7 +135,7 @@ func TestKubernetesUpgradeAdminMinorVersion(t *testing.T) { admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName) // get the OLD log - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) expectedNewVersion := testlib.GetUpgradedReleaseVersion(t, &options) @@ -153,7 +148,6 @@ func TestKubernetesUpgradeAdminMinorVersion(t *testing.T) { } func TestKubernetesUpgradeFullDatabase(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -187,7 +181,7 @@ func TestKubernetesUpgradeFullDatabase(t *testing.T) { admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", adminHelmChartReleaseName) // get the OLD log - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) defer testlib.Teardown(testlib.TEARDOWN_DATABASE) // ensure resources allocated in called functions are released when this function exits @@ -208,7 +202,7 @@ func TestKubernetesUpgradeFullDatabase(t *testing.T) { expectedNumberReconnects := 2 testlib.Await(t, func() bool { - return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, "Reconnected with process with connectKey", &v12.PodLogOptions{}) == expectedNumberReconnects + return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, "Reconnected with process with connectKey", &corev1.PodLogOptions{}) == expectedNumberReconnects }, 30*time.Second) }) @@ -338,7 +332,6 @@ func TestKubernetesUpgradeFullDatabase(t *testing.T) { func TestKubernetesRollingUpgradeAdminMinorVersion(t *testing.T) { t.Skip("4.0.7+ Admin is not rolling upgradeable from pre-4.0.7") - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := helm.Options{ @@ -370,9 +363,9 @@ func TestKubernetesRollingUpgradeAdminMinorVersion(t *testing.T) { admin1 := fmt.Sprintf("%s-nuodb-cluster0-1", helmChartReleaseName) admin2 := fmt.Sprintf("%s-nuodb-cluster0-2", helmChartReleaseName) - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) - go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &v12.PodLogOptions{Follow: true}) - go testlib.GetAppLog(t, namespaceName, admin2, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &corev1.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin2, "-previous", &corev1.PodLogOptions{Follow: true}) testlib.AwaitBalancerTerminated(t, namespaceName, "job-lb-policy") diff --git a/test/minikube/minikube_security_context_test.go b/test/minikube/minikube_security_context_test.go index c3b6ed65a..6b0a1b975 100644 --- a/test/minikube/minikube_security_context_test.go +++ b/test/minikube/minikube_security_context_test.go @@ -1,3 +1,4 @@ +//go:build long // +build long package minikube @@ -8,13 +9,12 @@ import ( "strings" "testing" - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func checkUser(t *testing.T, namespaceName string, podName string, container string, expectedUid int, expectedGid int, expectedSupplementaryGid int) { @@ -58,7 +58,6 @@ func securityContextTest(t *testing.T, adminUid int, adminGid int, adminFsGroup // owner gid for volumes; this test specifically checks secrets because // the hostpath storage-class used by Minikube does not support fsGroup - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) @@ -111,7 +110,7 @@ func securityContextTest(t *testing.T, adminUid int, adminGid int, adminFsGroup defer testlib.Teardown(testlib.TEARDOWN_ADMIN) helmChartReleaseName, _ := testlib.StartAdmin(t, &options, 1, namespaceName) adminStatefulSet := fmt.Sprintf("%s-nuodb-cluster0", helmChartReleaseName) - admin0 := adminStatefulSet+"-0" + admin0 := adminStatefulSet + "-0" defer testlib.Teardown(testlib.TEARDOWN_DATABASE) databaseReleaseName := testlib.StartDatabase(t, namespaceName, admin0, &options) @@ -141,7 +140,7 @@ func TestSecurityContextEnabled(t *testing.T) { databaseUid := 5678 databaseGid := 0 databaseFsGroup := 4000 - optionOverrides := map[string]string { + optionOverrides := map[string]string{ "admin.securityContext.enabled": "true", "database.securityContext.enabled": "true", } @@ -159,7 +158,7 @@ func TestSecurityContextRunAsNonRootGroup(t *testing.T) { databaseUid := 1000 databaseGid := 1000 databaseFsGroup := 1000 - optionOverrides := map[string]string { + optionOverrides := map[string]string{ "admin.securityContext.runAsNonRootGroup": "true", "database.securityContext.runAsNonRootGroup": "true", } @@ -175,7 +174,7 @@ func TestSecurityContextFsGroupOnly(t *testing.T) { databaseUid := 1000 databaseGid := 0 databaseFsGroup := 4000 - optionOverrides := map[string]string { + optionOverrides := map[string]string{ "admin.securityContext.fsGroupOnly": "true", "database.securityContext.fsGroupOnly": "true", } diff --git a/test/minikube/minikube_test_test.go b/test/minikube/minikube_test_test.go index 07b51db46..0074c4e10 100644 --- a/test/minikube/minikube_test_test.go +++ b/test/minikube/minikube_test_test.go @@ -1,3 +1,4 @@ +//go:build short // +build short package minikube @@ -6,9 +7,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) diff --git a/test/minikube/minikube_tls_admin_test.go b/test/minikube/minikube_tls_admin_test.go index bf870240e..0d8433a8b 100644 --- a/test/minikube/minikube_tls_admin_test.go +++ b/test/minikube/minikube_tls_admin_test.go @@ -1,20 +1,21 @@ +//go:build long // +build long package minikube import ( "fmt" - "github.com/stretchr/testify/require" - v12 "k8s.io/api/core/v1" "path/filepath" "strings" "testing" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) const ENGINE_CERTIFICATE_LOG_TEMPLATE = `Engine Certificate: Certificate #%d CN %s` @@ -34,7 +35,6 @@ func verifyKeystore(t *testing.T, namespace string, podName string, keystore str } func TestKubernetesTLS(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) @@ -97,7 +97,7 @@ func TestKubernetesTLS(t *testing.T) { tePodNameTemplate := fmt.Sprintf("te-%s", databaseReleaseName) tePodName := testlib.GetPodName(t, namespaceName, tePodNameTemplate) - go testlib.GetAppLog(t, namespaceName, tePodName, "", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, tePodName, "", &corev1.PodLogOptions{Follow: true}) // TE certificate is signed by the admin and the DN entry is the pod name // this is the 4th pod name because: #0 and #1 are trusted certs, #2 is CA, #3 is admin, #4 is engine @@ -122,7 +122,7 @@ func TestKubernetesTLS(t *testing.T) { tePodNameTemplate := fmt.Sprintf("te-%s", databaseReleaseName) tePodName := testlib.GetPodName(t, namespaceName, tePodNameTemplate) - go testlib.GetAppLog(t, namespaceName, tePodName, "", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, tePodName, "", &corev1.PodLogOptions{Follow: true}) // TE certificate is not signed by the admin and the DN entry is the generic admin name // this is the 3rd pod name because: #0 and #1 are trusted certs, #2 is CA, #3 is admin (and engine) diff --git a/test/minikube/minikube_tls_rotation_test.go b/test/minikube/minikube_tls_rotation_test.go index 8aa799364..0d5c0bfbe 100644 --- a/test/minikube/minikube_tls_rotation_test.go +++ b/test/minikube/minikube_tls_rotation_test.go @@ -10,15 +10,13 @@ import ( "strings" "testing" - v1 "k8s.io/api/core/v1" - - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyAdminCertificates(t *testing.T, info testlib.NuoDBCertificateInfo, expectedDN string) { @@ -52,7 +50,6 @@ func startDomainWithTLSCertificates(t *testing.T, options *helm.Options, namespa } func TestKubernetesTLSRotation(t *testing.T) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) randomSuffix := strings.ToLower(random.UniqueId()) @@ -117,8 +114,8 @@ func TestKubernetesTLSRotation(t *testing.T) { admin1 := fmt.Sprintf("%s-nuodb-cluster0-1", adminReleaseName) // get the OLD log - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v1.PodLogOptions{Follow: true}) - go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &v1.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &corev1.PodLogOptions{Follow: true}) // create the new certs... testlib.GenerateCustomCertificates(t, certGeneratorPodName, namespaceName, newTLSCommands) diff --git a/test/minikube/minikube_upgrade_helm_test.go b/test/minikube/minikube_upgrade_helm_test.go index 2e44a09ad..e6e0c7d6e 100644 --- a/test/minikube/minikube_upgrade_helm_test.go +++ b/test/minikube/minikube_upgrade_helm_test.go @@ -13,12 +13,12 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + corev1 "k8s.io/api/core/v1" + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - v12 "k8s.io/api/core/v1" ) func upgradeAdminTest(t *testing.T, fromHelmVersion string, upgradeOptions *testlib.UpgradeOptions) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := &helm.Options{ @@ -45,7 +45,7 @@ func upgradeAdminTest(t *testing.T, fromHelmVersion string, upgradeOptions *test admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName) // get the OLD log - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) adminPod := testlib.GetPod(t, namespaceName, admin0) @@ -64,7 +64,6 @@ func upgradeAdminTest(t *testing.T, fromHelmVersion string, upgradeOptions *test } func upgradeDatabaseTest(t *testing.T, fromHelmVersion string, enableCron bool, upgradeOptions *testlib.UpgradeOptions) { - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) options := &helm.Options{ @@ -97,7 +96,7 @@ func upgradeDatabaseTest(t *testing.T, fromHelmVersion string, enableCron bool, admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName) // get the OLD log - go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &corev1.PodLogOptions{Follow: true}) defer testlib.Teardown(testlib.TEARDOWN_DATABASE) databaseReleaseName := testlib.StartDatabase(t, namespaceName, admin0, options) @@ -123,7 +122,7 @@ func upgradeDatabaseTest(t *testing.T, fromHelmVersion string, enableCron bool, err := testlib.AwaitE(t, func() bool { return testlib.GetStringOccurrenceInLog(t, namespaceName, admin0, "Reconnected with process with connectKey", - &v12.PodLogOptions{ + &corev1.PodLogOptions{ Container: "admin", }) == 2 }, 120*time.Second) @@ -142,8 +141,8 @@ func upgradeDatabaseTest(t *testing.T, fromHelmVersion string, enableCron bool, tePodName := testlib.GetPodName(t, namespaceName, tePodNameTemplate) smPodName := testlib.GetPodName(t, namespaceName, smPodNameTemplate) - go testlib.GetAppLog(t, namespaceName, tePodName, "-pre-kill", &v12.PodLogOptions{Follow: true}) - go testlib.GetAppLog(t, namespaceName, smPodName, "-pre-kill", &v12.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, tePodName, "-pre-kill", &corev1.PodLogOptions{Follow: true}) + go testlib.GetAppLog(t, namespaceName, smPodName, "-pre-kill", &corev1.PodLogOptions{Follow: true}) testlib.KillProcess(t, namespaceName, tePodName) testlib.KillProcess(t, namespaceName, smPodName) @@ -160,52 +159,30 @@ func upgradeDatabaseTest(t *testing.T, fromHelmVersion string, enableCron bool, func TestUpgradeHelm(t *testing.T) { - t.Run("NuoDB_From330_ToLocal", func(t *testing.T) { - upgradeAdminTest(t, "3.3.0", &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) - }) - - t.Run("NuoDB_From340_ToLocal", func(t *testing.T) { - upgradeAdminTest(t, "3.4.0", &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) + t.Run("NuoDB_From370_ToLocal", func(t *testing.T) { + upgradeAdminTest(t, "3.7.0", &testlib.UpgradeOptions{}) }) - t.Run("NuoDB_From350_ToLocal", func(t *testing.T) { - upgradeAdminTest(t, "3.5.0", &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) + t.Run("NuoDB_From382_ToLocal", func(t *testing.T) { + upgradeAdminTest(t, "3.8.2", &testlib.UpgradeOptions{}) }) - t.Run("NuoDB_From360_ToLocal", func(t *testing.T) { - upgradeAdminTest(t, "3.6.0", &testlib.UpgradeOptions{}) + t.Run("NuoDB_From390_ToLocal", func(t *testing.T) { + upgradeAdminTest(t, "3.9.0", &testlib.UpgradeOptions{}) }) } func TestUpgradeHelmFullDB(t *testing.T) { - t.Run("NuoDB_From330_ToLocal", func(t *testing.T) { - upgradeDatabaseTest(t, "3.3.0", false, &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) - }) - - t.Run("NuoDB_From340_ToLocal", func(t *testing.T) { - upgradeDatabaseTest(t, "3.4.0", false, &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) + t.Run("NuoDB_From370_ToLocal", func(t *testing.T) { + upgradeDatabaseTest(t, "3.7.0", false, &testlib.UpgradeOptions{}) }) - t.Run("NuoDB_From350_ToLocal", func(t *testing.T) { - upgradeDatabaseTest(t, "3.5.0", true, &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) + t.Run("NuoDB_From382_ToLocal", func(t *testing.T) { + upgradeDatabaseTest(t, "3.8.2", true, &testlib.UpgradeOptions{}) }) - t.Run("NuoDB_From360_ToLocal", func(t *testing.T) { - upgradeDatabaseTest(t, "3.6.0", true, &testlib.UpgradeOptions{ - AdminPodShouldGetRecreated: true, - }) + t.Run("NuoDB_From390_ToLocal", func(t *testing.T) { + upgradeDatabaseTest(t, "3.9.0", true, &testlib.UpgradeOptions{}) }) } diff --git a/test/minikube/verify_utility.go b/test/minikube/verify_utility.go index 9697c5ab1..ead13e0a5 100644 --- a/test/minikube/verify_utility.go +++ b/test/minikube/verify_utility.go @@ -5,8 +5,9 @@ import ( "testing" "time" - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyAdminService(t *testing.T, namespaceName string, podName string, serviceName string, ping bool) { diff --git a/test/multicluster/minikube_base_multicluster_test.go b/test/multicluster/minikube_base_multicluster_test.go index cfdd34926..337388529 100644 --- a/test/multicluster/minikube_base_multicluster_test.go +++ b/test/multicluster/minikube_base_multicluster_test.go @@ -8,14 +8,13 @@ import ( "testing" "time" - "github.com/Masterminds/semver" - "github.com/stretchr/testify/require" - - "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" - + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + + "github.com/nuodb/nuodb-helm-charts/v3/test/testlib" ) func verifyNuoSQL(t *testing.T, namespaceName string, adminPod string, databaseName string) { @@ -31,7 +30,6 @@ func TestKubernetesRemoveOrphanNamespaces(t *testing.T) { // and all methods in our test framework rely on passing *testing.T // instance. Execute this cleanup tasks as separate test case for simplicity // as it's only needed in multi-cluster infrastructure. - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) context := context.Background() @@ -58,7 +56,6 @@ func TestKubernetesBasicMultiCluster(t *testing.T) { if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" && os.Getenv("NUODB_LICENSE_CONTENT") == "" { t.Skip("Cannot test multiple SMs without the Enterprise Edition") } - testlib.AwaitTillerUp(t) defer testlib.VerifyTeardown(t) // For multi-cluster deployment to work correctly, there are two prerequisites: diff --git a/test/testlib/NuoDBKubeConfig.go b/test/testlib/NuoDBKubeConfig.go index ed0c3290e..201f95e0c 100644 --- a/test/testlib/NuoDBKubeConfig.go +++ b/test/testlib/NuoDBKubeConfig.go @@ -5,16 +5,16 @@ import ( "io" "strings" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) type NuoDBKubeConfig struct { - Pods map[string]corev1.Pod `json:"pods"` - Deployments map[string]v1.Deployment `json:"deployments"` - StatefulSets map[string]v1.StatefulSet `json:"statefulsets"` - Volumes map[string]corev1.Volume `json:"volumes"` - DaemonSets map[string]v1.DaemonSet `json:"daemonSets"` + Pods map[string]corev1.Pod `json:"pods"` + Deployments map[string]appsv1.Deployment `json:"deployments"` + StatefulSets map[string]appsv1.StatefulSet `json:"statefulsets"` + Volumes map[string]corev1.Volume `json:"volumes"` + DaemonSets map[string]appsv1.DaemonSet `json:"daemonSets"` } func UnmarshalNuoDBKubeConfig(s string) (err error, kubeConfigs []NuoDBKubeConfig) { diff --git a/test/testlib/NuoDBRegistryEntry.go b/test/testlib/NuoDBRegistryEntry.go index 33471296b..27483d881 100644 --- a/test/testlib/NuoDBRegistryEntry.go +++ b/test/testlib/NuoDBRegistryEntry.go @@ -1,7 +1,7 @@ package testlib import ( - "gopkg.in/yaml.v3" + "github.com/ghodss/yaml" ) type Registry struct { @@ -15,10 +15,8 @@ type Registry struct { } // UnmarshalImageYAML is used to unmarshal into map[string]string -func UnmarshalImageYAML(s string) (err error, registry Registry) { - registry = Registry{} - - err = yaml.Unmarshal([]byte(s), ®istry) - - return +func UnmarshalImageYAML(s string) (Registry, error) { + registry := Registry{} + err := yaml.Unmarshal([]byte(s), ®istry) + return registry, err } diff --git a/test/testlib/core_recovery.go b/test/testlib/core_recovery.go index 5a20a5f03..80da36dca 100644 --- a/test/testlib/core_recovery.go +++ b/test/testlib/core_recovery.go @@ -2,13 +2,13 @@ package testlib import ( "fmt" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/stretchr/testify/require" - "os" "path/filepath" "testing" "time" + + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/require" ) const DEBUG_POD = ` diff --git a/test/testlib/haproxy_utilities.go b/test/testlib/haproxy_utilities.go index e8e788784..4c931f1ae 100644 --- a/test/testlib/haproxy_utilities.go +++ b/test/testlib/haproxy_utilities.go @@ -11,7 +11,7 @@ import ( "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/require" - v12 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) func StartHAProxyIngress(t *testing.T, options *helm.Options, namespaceName string) string { @@ -70,7 +70,7 @@ func StartHAProxyIngress(t *testing.T, options *helm.Options, namespaceName stri if _, err := k8s.GetPodE(t, kubectlOptions, haProxyPodName); err != nil { t.Logf("HAProxy pod '%s' is not available and logs can not be retrieved", haProxyPodName) } else { - go GetAppLog(t, namespaceName, haProxyPodName, "", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, haProxyPodName, "", &corev1.PodLogOptions{Follow: true}) } } }) diff --git a/test/testlib/hashicorp_utilities.go b/test/testlib/hashicorp_utilities.go index 3836e7e64..525b6914f 100644 --- a/test/testlib/hashicorp_utilities.go +++ b/test/testlib/hashicorp_utilities.go @@ -2,15 +2,16 @@ package testlib import ( "fmt" - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/gruntwork-io/terratest/modules/random" - "github.com/stretchr/testify/require" - v12 "k8s.io/api/core/v1" "path/filepath" "strings" "testing" "time" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" ) func StartVault(t *testing.T, options *helm.Options, namespaceName string) string { @@ -48,7 +49,7 @@ func StartVault(t *testing.T, options *helm.Options, namespaceName string) strin if err != nil { t.Logf("Vault pod '%s' is not available and logs can not be retrieved", vaultName) } else { - go GetAppLog(t, namespaceName, vaultName, "", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, vaultName, "", &corev1.PodLogOptions{Follow: true}) } }) diff --git a/test/testlib/minikube_utilities.go b/test/testlib/minikube_utilities.go index 3d0af655d..e41f358c0 100644 --- a/test/testlib/minikube_utilities.go +++ b/test/testlib/minikube_utilities.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -20,14 +19,11 @@ import ( "testing" "time" - "github.com/Masterminds/semver" - "github.com/stretchr/testify/require" - + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" - - v1 "k8s.io/api/apps/v1" - + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -219,7 +215,7 @@ func InjectOpenShiftOverrides(t *testing.T, options *helm.Options) { } func InjectTestValuesFile(t *testing.T, options *helm.Options) { - dat, err := ioutil.ReadFile(INJECT_VALUES_FILE) + dat, err := os.ReadFile(INJECT_VALUES_FILE) if err != nil { return } @@ -228,7 +224,7 @@ func InjectTestValuesFile(t *testing.T, options *helm.Options) { } func InjectTestVersion(t *testing.T, options *helm.Options) { - dat, err := ioutil.ReadFile(INJECT_FILE) + dat, err := os.ReadFile(INJECT_FILE) if err != nil { return } @@ -244,7 +240,7 @@ func InjectTestVersion(t *testing.T, options *helm.Options) { t.Log("Using injected values:\n", string(dat)) - err, image := UnmarshalImageYAML(string(dat)) + image, err := UnmarshalImageYAML(string(dat)) require.NoError(t, err) if options.SetValues == nil { @@ -265,14 +261,14 @@ func InjectTestVersion(t *testing.T, options *helm.Options) { } func OverrideUpgradeContainerImage(t *testing.T, options *helm.Options) { - dat, err := ioutil.ReadFile(UPGRADE_INJECT_FILE) + dat, err := os.ReadFile(UPGRADE_INJECT_FILE) if err != nil { return } t.Log("Overriding upgrade container image with injected values:\n", string(dat)) - err, image := UnmarshalImageYAML(string(dat)) + image, err := UnmarshalImageYAML(string(dat)) require.NoError(t, err) if options.SetValues == nil { @@ -416,28 +412,6 @@ func AwaitE(t *testing.T, lmbd func() bool, timeout time.Duration) error { } } -func AwaitTillerUp(t *testing.T) { - version, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "version", "--short") - require.NoError(t, err) - - t.Logf("Using Helm %s", version) - - if strings.Contains(version, "v3.") { - return - } - - Await(t, func() bool { - for _, pod := range FindAllPodsInSchema(t, "kube-system") { - if strings.Contains(pod.Name, "tiller-deploy") { - if arePodConditionsMet(&pod, corev1.PodReady, corev1.ConditionTrue) { - return true - } - } - } - return false - }, 30*time.Second) -} - func AwaitNrReplicasScheduled(t *testing.T, namespace string, expectedName string, nrReplicas int) { // in multi-cluster tests the Pods won't be scheduled until disks are // provisioned which takes longer than in minikube; adjust the timeout if @@ -880,7 +854,7 @@ func VerifyLicenseIsCommunity(t *testing.T, namespace string, podName string) { } func VerifyLicensingErrorsInLog(t *testing.T, namespace string, podName string, expectError bool) { - buf, err := ioutil.ReadAll(getAppLogStream(t, namespace, podName, &corev1.PodLogOptions{})) + buf, err := io.ReadAll(getAppLogStream(t, namespace, podName, &corev1.PodLogOptions{})) require.NoError(t, err) fullLog := string(buf) @@ -889,7 +863,7 @@ func VerifyLicensingErrorsInLog(t *testing.T, namespace string, podName string, } func GetStringOccurrenceInLog(t *testing.T, namespace string, podName string, expectedLogLine string, podLogOptions *corev1.PodLogOptions) int { - buf, err := ioutil.ReadAll(getAppLogStream(t, namespace, podName, podLogOptions)) + buf, err := io.ReadAll(getAppLogStream(t, namespace, podName, podLogOptions)) require.NoError(t, err) fullLog := string(buf) @@ -899,7 +873,7 @@ func GetStringOccurrenceInLog(t *testing.T, namespace string, podName string, ex } func GetRegexOccurrenceInLog(t *testing.T, namespace string, podName string, expectedLogLine string, podLogOptions *corev1.PodLogOptions) int { - buf, err := ioutil.ReadAll(getAppLogStream(t, namespace, podName, podLogOptions)) + buf, err := io.ReadAll(getAppLogStream(t, namespace, podName, podLogOptions)) require.NoError(t, err) fullLog := string(buf) @@ -912,7 +886,7 @@ func GetRegexOccurrenceInLog(t *testing.T, namespace string, podName string, exp } func VerifyCertificateInLog(t *testing.T, namespace string, podName string, expectedLogLine string) { - buf, err := ioutil.ReadAll(getAppLogStream(t, namespace, podName, &corev1.PodLogOptions{})) + buf, err := io.ReadAll(getAppLogStream(t, namespace, podName, &corev1.PodLogOptions{})) require.NoError(t, err) fullLog := string(buf) @@ -1115,7 +1089,7 @@ func GetSecret(t *testing.T, namespace string, secretName string) *corev1.Secret return k8s.GetSecret(t, options, secretName) } -func GetDaemonSet(t *testing.T, namespace string, daemonSetName string) *v1.DaemonSet { +func GetDaemonSet(t *testing.T, namespace string, daemonSetName string) *appsv1.DaemonSet { options := k8s.NewKubectlOptions("", "", namespace) clientset, err := k8s.GetKubernetesClientFromOptionsE(t, options) @@ -1194,7 +1168,7 @@ func GetNuoDBK8sConfigDump(t *testing.T, namespace string, podName string) NuoDB k8s.RunKubectl(t, options, "cp", podName+":/tmp/nuodb-dump.json", targetFile) - content, err := ioutil.ReadFile(targetFile) + content, err := os.ReadFile(targetFile) require.NoError(t, err) err, unmarshalledDump := UnmarshalNuoDBKubeConfig(string(content)) require.NoError(t, err) @@ -1203,7 +1177,7 @@ func GetNuoDBK8sConfigDump(t *testing.T, namespace string, podName string) NuoDB } func ExecuteCommandsInPod(t *testing.T, namespaceName string, podName string, commands []string) { - tmpfile, err := ioutil.TempFile("", "script") + tmpfile, err := os.CreateTemp("", "script") if err != nil { require.NoError(t, err) } @@ -1337,7 +1311,7 @@ func GetNamespaces(t *testing.T) []corev1.Namespace { return namespaces.Items } -func GetStatefulSets(t *testing.T, namespaceName string) *v1.StatefulSetList { +func GetStatefulSets(t *testing.T, namespaceName string) *appsv1.StatefulSetList { options := k8s.NewKubectlOptions("", "", namespaceName) clientset, err := k8s.GetKubernetesClientFromOptionsE(t, options) @@ -1348,7 +1322,7 @@ func GetStatefulSets(t *testing.T, namespaceName string) *v1.StatefulSetList { return statefulSets } -func GetStatefulSet(t *testing.T, namespaceName, name string) *v1.StatefulSet { +func GetStatefulSet(t *testing.T, namespaceName, name string) *appsv1.StatefulSet { options := k8s.NewKubectlOptions("", "", namespaceName) clientset, err := k8s.GetKubernetesClientFromOptionsE(t, options) @@ -1419,9 +1393,9 @@ func GetDatabaseLoadBalancerConfigE(t *testing.T, dbName string, loadBalancerCon } type NuoDBStatefulSets struct { - AdminSet v1.StatefulSet - SmNonHCSet v1.StatefulSet - SmHCSet v1.StatefulSet + AdminSet appsv1.StatefulSet + SmNonHCSet appsv1.StatefulSet + SmHCSet appsv1.StatefulSet } func FindAllStatefulSets(t *testing.T, namespaceName string) NuoDBStatefulSets { diff --git a/test/testlib/multicluster_utilities.go b/test/testlib/multicluster_utilities.go index d4aa1f2e7..153013cb6 100644 --- a/test/testlib/multicluster_utilities.go +++ b/test/testlib/multicluster_utilities.go @@ -5,9 +5,9 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net" + "os" "sort" "strings" "testing" @@ -16,7 +16,7 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/require" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const CONTEXT_CLUSTER_KEY = CONTEXT_KEY("cluster") @@ -90,7 +90,7 @@ func CopyMap(m map[string]string) map[string]string { * */ func InjectClusters(t *testing.T, cluster K8sCluster) K8sCluster { - dat, err := ioutil.ReadFile(INJECT_CLUSTERS_FILE) + dat, err := os.ReadFile(INJECT_CLUSTERS_FILE) if err != nil { return cluster } @@ -312,7 +312,7 @@ func updateDnsConfig(t *testing.T, ctx context.Context, kubectlOptions *k8s.Kube // Create K8s client and get configmap for CoreDNS clientset, err := k8s.GetKubernetesClientFromOptionsE(t, kubectlOptions) require.NoError(t, err, "Unable to create K8s client") - cm, err := clientset.CoreV1().ConfigMaps(COREDNS_NS).Get(ctx, COREDNS_CM, v1.GetOptions{}) + cm, err := clientset.CoreV1().ConfigMaps(COREDNS_NS).Get(ctx, COREDNS_CM, metav1.GetOptions{}) require.NoError(t, err, "Unable to get CoreDNS configmap") config, ok := cm.Data[COREFILE_KEY] require.True(t, ok, "Did not find key %s in CoreDNS configmap", COREFILE_KEY) @@ -329,7 +329,7 @@ func updateDnsConfig(t *testing.T, ctx context.Context, kubectlOptions *k8s.Kube } t.Logf("Adding DNS config snippet %s", dnsConfigSnippet) cm.Data[COREFILE_KEY] = updatedConfig - _, err = clientset.CoreV1().ConfigMaps(COREDNS_NS).Update(ctx, cm, v1.UpdateOptions{}) + _, err = clientset.CoreV1().ConfigMaps(COREDNS_NS).Update(ctx, cm, metav1.UpdateOptions{}) require.NoError(t, err) } diff --git a/test/testlib/nuodb_admin_utilities.go b/test/testlib/nuodb_admin_utilities.go index 223c4037b..68940a4a3 100644 --- a/test/testlib/nuodb_admin_utilities.go +++ b/test/testlib/nuodb_admin_utilities.go @@ -4,7 +4,6 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -12,13 +11,12 @@ import ( "testing" "time" - v12 "k8s.io/api/core/v1" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" ) func getFunctionCallerName() string { @@ -123,7 +121,7 @@ func StartAdminTemplate(t *testing.T, options *helm.Options, replicaCount int, n _ = k8s.RunKubectlE(t, kubectlOptions, "exec", adminName, "-c", "admin", "--", "bash", "-c", "pgrep -x java | xargs -r kill -3") } // collect logs - go GetAppLog(t, namespaceName, adminName, "", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, adminName, "", &corev1.PodLogOptions{Follow: true}) GetAdminEventLog(t, namespaceName, adminName) } }) @@ -239,7 +237,7 @@ func ApplyLicense(t *testing.T, namespace string, adminPod string, licenseType L if licenseContent != "" { licenseContentBytes, err := base64.StdEncoding.DecodeString(licenseContent) require.NoError(t, err) - tmpfile, err := ioutil.TempFile("", "license") + tmpfile, err := os.CreateTemp("", "license") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.Write(licenseContentBytes) diff --git a/test/testlib/nuodb_database_utilities.go b/test/testlib/nuodb_database_utilities.go index 791e8f3b5..d2aad51e8 100644 --- a/test/testlib/nuodb_database_utilities.go +++ b/test/testlib/nuodb_database_utilities.go @@ -15,14 +15,12 @@ import ( "text/template" "time" - corev1 "k8s.io/api/core/v1" - v12 "k8s.io/api/core/v1" - - "github.com/Masterminds/semver" + "github.com/Masterminds/semver/v3" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" ) const UPGRADE_STRATEGY = ` @@ -219,7 +217,7 @@ func StartDatabaseTemplate(t *testing.T, namespaceName string, adminPod string, pods, _ := findPods(t, namespaceName, tePodNameTemplate) for _, tePod := range pods { t.Logf("Getting log from TE pod: %s", tePod.Name) - go GetAppLog(t, namespaceName, tePod.Name, "", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, tePod.Name, "", &corev1.PodLogOptions{Follow: true}) } }) // the TEs will become RUNNING after the SMs as they need an entry node @@ -236,7 +234,7 @@ func StartDatabaseTemplate(t *testing.T, namespaceName string, adminPod string, pods, _ := findPods(t, namespaceName, smPodNameTemplate) for _, smPod := range pods { t.Logf("Getting log from SM pod: %s", smPod.Name) - go GetAppLog(t, namespaceName, smPod.Name, "", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, smPod.Name, "", &corev1.PodLogOptions{Follow: true}) } }) AwaitPodUp(t, namespaceName, smPodName0, readyTimeout) @@ -282,11 +280,11 @@ func UpgradeDatabase(t *testing.T, namespaceName string, helmChartReleaseName st // get the OLD log tePodName := GetPodName(t, namespaceName, tePodNameTemplate) tePod := GetPod(t, namespaceName, tePodName) - go GetAppLog(t, namespaceName, tePodName, "-previous", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, tePodName, "-previous", &corev1.PodLogOptions{Follow: true}) smPodName0 := GetPodName(t, namespaceName, smPodName) smPod0 := GetPod(t, namespaceName, smPodName0) - go GetAppLog(t, namespaceName, smPodName0, "-previous", &v12.PodLogOptions{Follow: true}) + go GetAppLog(t, namespaceName, smPodName0, "-previous", &corev1.PodLogOptions{Follow: true}) AddDiagnosticTeardown(TEARDOWN_DATABASE, t, func() { _ = k8s.RunKubectlE(t, kubectlOptions, "exec", adminPod, "--", "nuocmd", "show", "domain") @@ -354,7 +352,7 @@ func RestoreDatabase(t *testing.T, namespaceName string, podName string, databas AddDiagnosticTeardown(TEARDOWN_RESTORE, t, func() { restorePodName := GetPodName(t, namespaceName, "restore-demo-") k8s.RunKubectl(t, kubectlOptions, "describe", "pod", restorePodName) - GetAppLog(t, namespaceName, restorePodName, "restore-job", &v12.PodLogOptions{}) + GetAppLog(t, namespaceName, restorePodName, "restore-job", &corev1.PodLogOptions{}) }) // Remove restore job if exist as it's not unique for a restore chart release k8s.RunKubectlE(t, kubectlOptions, "delete", "job", "restore-"+options.SetValues["database.name"]) @@ -403,7 +401,7 @@ func BackupDatabaseE( defer func() { // Get backup logs and delete the job if pod, err := FindPod(t, namespaceName, jobName); err == nil { - GetAppLog(t, namespaceName, pod.Name, "", &v12.PodLogOptions{Container: "nuodb"}) + GetAppLog(t, namespaceName, pod.Name, "", &corev1.PodLogOptions{Container: "nuodb"}) } k8s.RunKubectl(t, opts, "delete", "job", jobName) }() diff --git a/test/testlib/secrets.go b/test/testlib/secrets.go index 0ec69ba6e..3215425e1 100644 --- a/test/testlib/secrets.go +++ b/test/testlib/secrets.go @@ -4,13 +4,13 @@ import ( "bufio" "encoding/base64" "fmt" - "github.com/stretchr/testify/require" - "io/ioutil" + "io" "os" "path/filepath" "testing" "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/require" ) const TLS_SECRET_PASSWORD_YAML_TEMPLATE = `--- @@ -44,7 +44,7 @@ func ReadAll(path string) ([]byte, error) { defer file.Close() reader := bufio.NewReader(file) - content, rerr := ioutil.ReadAll(reader) + content, rerr := io.ReadAll(reader) if rerr != nil { return nil, rerr } diff --git a/test/testlib/template_utilities.go b/test/testlib/template_utilities.go index 64aa7276a..47476131f 100644 --- a/test/testlib/template_utilities.go +++ b/test/testlib/template_utilities.go @@ -5,19 +5,18 @@ import ( "strings" "testing" - "k8s.io/api/batch/v1beta1" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/google/go-cmp/cmp" "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" ) func ArgContains(args []string, x string) bool { @@ -29,7 +28,7 @@ func ArgContains(args []string, x string) bool { return false } -func EnvGet(envs []v1.EnvVar, key string) (string, bool) { +func EnvGet(envs []corev1.EnvVar, key string) (string, bool) { for _, n := range envs { if n.Name == key { return n.Value, true @@ -38,25 +37,25 @@ func EnvGet(envs []v1.EnvVar, key string) (string, bool) { return "", false } -func AssertEnvNotContains(t *testing.T, envs []v1.EnvVar, key string) { +func AssertEnvNotContains(t *testing.T, envs []corev1.EnvVar, key string) { actual, ok := EnvGet(envs, key) assert.False(t, ok, "Unexpected environment variable %s=%s", key, actual) } -func AssertEnvContains(t *testing.T, envs []v1.EnvVar, key, expected string) { +func AssertEnvContains(t *testing.T, envs []corev1.EnvVar, key, expected string) { actual, ok := EnvGet(envs, key) assert.True(t, ok, "Environment variable %s not set", key) assert.Equal(t, expected, actual) } -func EnvContains(envs []v1.EnvVar, key string, expected string) bool { +func EnvContains(envs []corev1.EnvVar, key string, expected string) bool { if actual, ok := EnvGet(envs, key); ok { return actual == expected } return false } -func EnvContainsValueFrom(envs []v1.EnvVar, key string, valueFrom *v1.EnvVarSource) bool { +func EnvContainsValueFrom(envs []corev1.EnvVar, key string, valueFrom *corev1.EnvVarSource) bool { for _, n := range envs { if n.Name == key && cmp.Equal(n.ValueFrom, valueFrom) { return true @@ -65,7 +64,7 @@ func EnvContainsValueFrom(envs []v1.EnvVar, key string, valueFrom *v1.EnvVarSour return false } -func EnvFromSourceContains(envs []v1.EnvFromSource, value string) bool { +func EnvFromSourceContains(envs []corev1.EnvFromSource, value string) bool { for _, n := range envs { if n.ConfigMapRef.Name == value { return true @@ -74,7 +73,7 @@ func EnvFromSourceContains(envs []v1.EnvFromSource, value string) bool { return false } -func MountContains(mounts []v1.VolumeMount, expectedName string) bool { +func MountContains(mounts []corev1.VolumeMount, expectedName string) bool { for _, mount := range mounts { if mount.Name == expectedName { return true @@ -83,7 +82,7 @@ func MountContains(mounts []v1.VolumeMount, expectedName string) bool { return false } -func GetMount(mounts []v1.VolumeMount, expectedName string) (*v1.VolumeMount, bool) { +func GetMount(mounts []corev1.VolumeMount, expectedName string) (*corev1.VolumeMount, bool) { for _, mount := range mounts { if mount.Name == expectedName { return &mount, true @@ -92,7 +91,7 @@ func GetMount(mounts []v1.VolumeMount, expectedName string) (*v1.VolumeMount, bo return nil, false } -func VolumesContains(mounts []v1.Volume, expectedName string) bool { +func VolumesContains(mounts []corev1.Volume, expectedName string) bool { for _, mount := range mounts { if mount.Name == expectedName { return true @@ -117,7 +116,7 @@ func MapContains(actual map[string]string, expected map[string]string) (string, return "", true } -func GetVolume(volumes []v1.Volume, expectedName string) (*v1.Volume, bool) { +func GetVolume(volumes []corev1.Volume, expectedName string) (*corev1.Volume, bool) { for _, volume := range volumes { if volume.Name == expectedName { return &volume, true @@ -126,7 +125,7 @@ func GetVolume(volumes []v1.Volume, expectedName string) (*v1.Volume, bool) { return nil, false } -func GetVolumeClaim(vcp []v1.PersistentVolumeClaim, expectedName string) (*v1.PersistentVolumeClaim, bool) { +func GetVolumeClaim(vcp []corev1.PersistentVolumeClaim, expectedName string) (*corev1.PersistentVolumeClaim, bool) { for _, volume := range vcp { if volume.Name == expectedName { return &volume, true @@ -155,16 +154,16 @@ func SplitAndRender[T any](t *testing.T, output string, expectedNrObjects int, k return objects } -func SplitAndRenderPersistentVolumeClaim(t *testing.T, output string, expectedNrObjects int) []v1.PersistentVolumeClaim { - return SplitAndRender[v1.PersistentVolumeClaim](t, output, expectedNrObjects, "PersistentVolumeClaim") +func SplitAndRenderPersistentVolumeClaim(t *testing.T, output string, expectedNrObjects int) []corev1.PersistentVolumeClaim { + return SplitAndRender[corev1.PersistentVolumeClaim](t, output, expectedNrObjects, "PersistentVolumeClaim") } -func SplitAndRenderConfigMap(t *testing.T, output string, expectedNrObjects int) []v1.ConfigMap { - return SplitAndRender[v1.ConfigMap](t, output, expectedNrObjects, "ConfigMap") +func SplitAndRenderConfigMap(t *testing.T, output string, expectedNrObjects int) []corev1.ConfigMap { + return SplitAndRender[corev1.ConfigMap](t, output, expectedNrObjects, "ConfigMap") } -func SplitAndRenderCronJob(t *testing.T, output string, expectedNrObjects int) []v1beta1.CronJob { - return SplitAndRender[v1beta1.CronJob](t, output, expectedNrObjects, "CronJob") +func SplitAndRenderCronJob(t *testing.T, output string, expectedNrObjects int) []batchv1beta1.CronJob { + return SplitAndRender[batchv1beta1.CronJob](t, output, expectedNrObjects, "CronJob") } func SplitAndRenderDaemonSet(t *testing.T, output string, expectedNrObjects int) []appsv1.DaemonSet { @@ -179,16 +178,16 @@ func SplitAndRenderDeployment(t *testing.T, output string, expectedNrObjects int return SplitAndRender[appsv1.Deployment](t, output, expectedNrObjects, "Deployment") } -func SplitAndRenderReplicationController(t *testing.T, output string, expectedNrObjects int) []v1.ReplicationController { - return SplitAndRender[v1.ReplicationController](t, output, expectedNrObjects, "ReplicationController") +func SplitAndRenderReplicationController(t *testing.T, output string, expectedNrObjects int) []corev1.ReplicationController { + return SplitAndRender[corev1.ReplicationController](t, output, expectedNrObjects, "ReplicationController") } -func SplitAndRenderSecret(t *testing.T, output string, expectedNrObjects int) []v1.Secret { - return SplitAndRender[v1.Secret](t, output, expectedNrObjects, "Secret") +func SplitAndRenderSecret(t *testing.T, output string, expectedNrObjects int) []corev1.Secret { + return SplitAndRender[corev1.Secret](t, output, expectedNrObjects, "Secret") } -func SplitAndRenderService(t *testing.T, output string, expectedNrObjects int) []v1.Service { - return SplitAndRender[v1.Service](t, output, expectedNrObjects, "Service") +func SplitAndRenderService(t *testing.T, output string, expectedNrObjects int) []corev1.Service { + return SplitAndRender[corev1.Service](t, output, expectedNrObjects, "Service") } func SplitAndRenderStatefulSet(t *testing.T, output string, expectedNrObjects int) []appsv1.StatefulSet { @@ -211,8 +210,8 @@ func SplitAndRenderClusterClusterRoleBinding(t *testing.T, output string, expect return SplitAndRender[rbacv1.ClusterRoleBinding](t, output, expectedNrObjects, "ClusterRoleBinding") } -func SplitAndRenderServiceAccount(t *testing.T, output string, expectedNrObjects int) []v1.ServiceAccount { - return SplitAndRender[v1.ServiceAccount](t, output, expectedNrObjects, "ServiceAccount") +func SplitAndRenderServiceAccount(t *testing.T, output string, expectedNrObjects int) []corev1.ServiceAccount { + return SplitAndRender[corev1.ServiceAccount](t, output, expectedNrObjects, "ServiceAccount") } func SplitAndRenderIngress(t *testing.T, output string, expectedNrObjects int) []networkingv1.Ingress { diff --git a/test/testlib/tls.go b/test/testlib/tls.go index 66ea07a74..1a6bed58c 100644 --- a/test/testlib/tls.go +++ b/test/testlib/tls.go @@ -2,7 +2,6 @@ package testlib import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -10,12 +9,11 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/otiai10/copy" + "github.com/stretchr/testify/require" ) const TLS_GENERATOR_POD_TEMPLATE = `--- @@ -56,7 +54,7 @@ func verifyCertificateFiles(t *testing.T, directory string) { NUOCMD_FILE, } - files, err := ioutil.ReadDir(directory) + files, err := os.ReadDir(directory) require.NoError(t, err) set := make(map[string]bool) @@ -85,7 +83,7 @@ func CopyCertificatesToControlHost(t *testing.T, podName string, namespaceName s options := k8s.NewKubectlOptions("", "", namespaceName) prefix := "tls-keys" - targetDirectory, err := ioutil.TempDir("", prefix) + targetDirectory, err := os.MkdirTemp("", prefix) require.NoError(t, err, "Unable to create TMP directory with prefix ", prefix) AddTeardown(TEARDOWN_SECRETS, func() { os.RemoveAll(targetDirectory) })