From ef05a4a4654e748ba8e0e8ec758a7691b57a2f27 Mon Sep 17 00:00:00 2001 From: Jordan Date: Sat, 28 Dec 2024 23:11:53 -0600 Subject: [PATCH] manual test fixes Signed-off-by: Jordan --- .golangci.yaml | 9 ++ alpha/action/migrate_test.go | 2 +- alpha/action/migrations/migrations_test.go | 2 +- alpha/action/render_test.go | 17 +- alpha/declcfg/declcfg_to_model_test.go | 2 +- alpha/declcfg/helpers_test.go | 8 +- alpha/declcfg/load_benchmark_test.go | 21 ++- alpha/declcfg/load_test.go | 20 +-- alpha/declcfg/model_to_declcfg_test.go | 3 +- alpha/declcfg/write_test.go | 4 +- pkg/api/conversion_test.go | 14 +- pkg/cache/cache_test.go | 26 +-- pkg/cache/tar_test.go | 3 +- pkg/configmap/configmap_test.go | 35 ++-- .../dockerfilegenerator_test.go | 4 +- pkg/containertools/labelreader_test.go | 8 +- pkg/image/registry_test.go | 12 +- pkg/lib/bundle/exporter_test.go | 8 +- pkg/lib/bundle/generate_test.go | 26 +-- pkg/lib/bundle/utils_test.go | 17 +- pkg/lib/bundle/validate_test.go | 2 +- pkg/lib/dns/nsswitch_test.go | 2 +- pkg/lib/indexer/indexer_test.go | 4 +- pkg/lib/registry/registry_test.go | 36 +++-- pkg/mirror/mirror_test.go | 14 +- .../prettyunmarshaler_test.go | 15 +- pkg/registry/bundle_test.go | 2 + pkg/registry/csv_test.go | 152 +++++++++--------- pkg/registry/decode_test.go | 14 +- pkg/registry/helper_test.go | 6 +- pkg/registry/parse_test.go | 4 +- pkg/registry/populator_test.go | 85 +++++----- pkg/registry/types_test.go | 10 +- pkg/server/server_test.go | 45 +++--- pkg/sqlite/configmap_test.go | 36 +++-- pkg/sqlite/conversion_test.go | 22 +-- pkg/sqlite/directory_test.go | 11 +- pkg/sqlite/graphloader_test.go | 10 +- pkg/sqlite/load_test.go | 30 ++-- .../migrations/001_related_images_test.go | 18 +-- pkg/sqlite/migrations/002_bundle_path_test.go | 7 +- .../migrations/003_required_apis_test.go | 20 +-- .../migrations/004_cascade_delete_test.go | 42 ++--- .../migrations/005_version_skiprange_test.go | 12 +- .../006_associate_apis_with_bundle_test.go | 37 ++--- .../migrations/007_replaces_skips_test.go | 20 +-- .../migrations/008_dependencies_test.go | 10 +- pkg/sqlite/migrations/009_properties_test.go | 10 +- .../010_set_bundlepath_pkg_property_test.go | 4 +- .../migrations/011_substitutes_for_test.go | 4 +- pkg/sqlite/migrations/012_deprecated_test.go | 2 +- .../013_rm_truncated_deprecations_test.go | 3 +- pkg/sqlite/migrator_test.go | 6 +- pkg/sqlite/query_sql_test.go | 2 +- pkg/sqlite/remove_test.go | 2 +- pkg/sqlite/stranded_test.go | 6 +- test/e2e/bundle_image_test.go | 2 +- 57 files changed, 500 insertions(+), 448 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 0be71e40a..f3ef4fb4d 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -68,6 +68,15 @@ linters-settings: - pkg: github.com/blang/semver/v4 alias: bsemver +issues: + # exclusion rules, mostly to avoid functionally complete areas and reduce unit test noise + exclude-rules: + - path: _test\.go + linters: + - unused + - govet + + output: formats: - format: tab diff --git a/alpha/action/migrate_test.go b/alpha/action/migrate_test.go index 5ffcc66b8..be7d90858 100644 --- a/alpha/action/migrate_test.go +++ b/alpha/action/migrate_test.go @@ -125,7 +125,7 @@ func TestMigrate(t *testing.T) { return } actualFS := os.DirFS(s.migrate.OutputDir) - fs.WalkDir(actualFS, ".", func(path string, d fs.DirEntry, err error) error { + _ = fs.WalkDir(actualFS, ".", func(path string, d fs.DirEntry, err error) error { require.NoError(t, err) if d.IsDir() { return nil diff --git a/alpha/action/migrations/migrations_test.go b/alpha/action/migrations/migrations_test.go index c25b96a47..d3b1ad074 100644 --- a/alpha/action/migrations/migrations_test.go +++ b/alpha/action/migrations/migrations_test.go @@ -59,7 +59,7 @@ func TestMigrations(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - var config declcfg.DeclarativeConfig = unmigratedCatalogFBC() + var config = unmigratedCatalogFBC() for _, m := range test.migrators.Migrations { err := m.Migrate(&config) diff --git a/alpha/action/render_test.go b/alpha/action/render_test.go index fa885ee2e..72fdd8a4e 100644 --- a/alpha/action/render_test.go +++ b/alpha/action/render_test.go @@ -4,7 +4,6 @@ import ( "context" "embed" "encoding/json" - "errors" "fmt" "io/fs" "os" @@ -13,7 +12,6 @@ import ( "testing/fstest" "text/template" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/yaml" @@ -86,7 +84,7 @@ func TestRender(t *testing.T) { image.SimpleReference("test.registry/foo-operator/foo-bundle:v0.1.0"): "testdata/foo-bundle-v0.1.0", image.SimpleReference("test.registry/foo-operator/foo-bundle:v0.2.0"): "testdata/foo-bundle-v0.2.0", } - assert.NoError(t, generateSqliteFile(dbFile, imageMap)) + require.NoError(t, generateSqliteFile(dbFile, imageMap)) testMigrations := migrations.Migrations{ Migrations: []migrations.Migration{ fauxMigration{"faux-migration", "my help text", func(d *declcfg.DeclarativeConfig) error { @@ -1235,7 +1233,7 @@ func TestAllowRefMask(t *testing.T) { image.SimpleReference("test.registry/foo-operator/foo-bundle:v0.1.0"): "testdata/foo-bundle-v0.1.0", image.SimpleReference("test.registry/foo-operator/foo-bundle:v0.2.0"): "testdata/foo-bundle-v0.2.0", } - assert.NoError(t, generateSqliteFile(dbFile, imageMap)) + require.NoError(t, generateSqliteFile(dbFile, imageMap)) specs := []spec{ { @@ -1365,18 +1363,17 @@ func TestAllowRefMask(t *testing.T) { for _, s := range specs { t.Run(s.name, func(t *testing.T) { _, err := s.render.Run(context.Background()) - require.True(t, errors.Is(err, s.expectErr), "expected error %#v to be %#v", err, s.expectErr) + require.ErrorIs(t, err, s.expectErr, "expected error %#v to be %#v", err, s.expectErr) }) } } func TestAllowRefMaskAllowed(t *testing.T) { type spec struct { - name string - mask action.RefType - pass []action.RefType - fail []action.RefType - expect bool + name string + mask action.RefType + pass []action.RefType + fail []action.RefType } specs := []spec{ diff --git a/alpha/declcfg/declcfg_to_model_test.go b/alpha/declcfg/declcfg_to_model_test.go index 97250efca..de8639c1b 100644 --- a/alpha/declcfg/declcfg_to_model_test.go +++ b/alpha/declcfg/declcfg_to_model_test.go @@ -498,7 +498,7 @@ func TestConvertToModelRoundtrip(t *testing.T) { assert.Equal(t, expected.Packages, actual.Packages) assert.Equal(t, expected.Bundles, actual.Bundles) - assert.Len(t, actual.Others, 0, "expected unrecognized schemas not to make the roundtrip") + assert.Empty(t, actual.Others, "expected unrecognized schemas not to make the roundtrip") } func hasError(expectedError string) require.ErrorAssertionFunc { diff --git a/alpha/declcfg/helpers_test.go b/alpha/declcfg/helpers_test.go index ff39b7d5f..1d55f9e2a 100644 --- a/alpha/declcfg/helpers_test.go +++ b/alpha/declcfg/helpers_test.go @@ -146,7 +146,7 @@ func withNoBundleData() func(*Bundle) { } func newTestBundle(packageName, version string, opts ...bundleOpt) Bundle { - csvJson := fmt.Sprintf(`{"kind": "ClusterServiceVersion", "apiVersion": "operators.coreos.com/v1alpha1", "metadata":{"name":%q}}`, testBundleName(packageName, version)) + csvJSON := fmt.Sprintf(`{"kind": "ClusterServiceVersion", "apiVersion": "operators.coreos.com/v1alpha1", "metadata":{"name":%q}}`, testBundleName(packageName, version)) b := Bundle{ Schema: SchemaBundle, Name: testBundleName(packageName, version), @@ -154,7 +154,7 @@ func newTestBundle(packageName, version string, opts ...bundleOpt) Bundle { Image: testBundleImage(packageName, version), Properties: []property.Property{ property.MustBuildPackage(packageName, version), - property.MustBuildBundleObject([]byte(csvJson)), + property.MustBuildBundleObject([]byte(csvJSON)), property.MustBuildBundleObject([]byte(`{"kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1"}`)), }, RelatedImages: []RelatedImage{ @@ -163,9 +163,9 @@ func newTestBundle(packageName, version string, opts ...bundleOpt) Bundle { Image: testBundleImage(packageName, version), }, }, - CsvJSON: csvJson, + CsvJSON: csvJSON, Objects: []string{ - csvJson, + csvJSON, `{"kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1"}`, }, } diff --git a/alpha/declcfg/load_benchmark_test.go b/alpha/declcfg/load_benchmark_test.go index a99c95c61..46f3138be 100644 --- a/alpha/declcfg/load_benchmark_test.go +++ b/alpha/declcfg/load_benchmark_test.go @@ -2,9 +2,10 @@ package declcfg_test import ( "context" + "crypto/rand" "encoding/base64" "fmt" - "math/rand" + "math/big" "os" "runtime" "testing" @@ -65,7 +66,11 @@ func generateFBC(b *testing.B, numPackages, numChannels, numBundles int) *declcf }) } for i := 0; i < numChannels; i++ { - pkgName := fbc.Packages[rand.Intn(numPackages)].Name + r, err := rand.Int(rand.Reader, big.NewInt(int64(numPackages))) + if err != nil { + b.Error(err) + } + pkgName := fbc.Packages[r.Int64()].Name channelName := fmt.Sprintf("channel-%d", i) fbc.Channels = append(fbc.Channels, declcfg.Channel{ Schema: declcfg.SchemaChannel, @@ -74,7 +79,11 @@ func generateFBC(b *testing.B, numPackages, numChannels, numBundles int) *declcf }) } for i := 0; i < numBundles; i++ { - pkgName := fbc.Packages[rand.Intn(numPackages)].Name + r, err := rand.Int(rand.Reader, big.NewInt(int64(numPackages))) + if err != nil { + b.Error(err) + } + pkgName := fbc.Packages[r.Int64()].Name bundleName := fmt.Sprintf("bundle-%d", i) version := fmt.Sprintf("0.%d.0", i) bundle := declcfg.Bundle{ @@ -94,7 +103,11 @@ func generateFBC(b *testing.B, numPackages, numChannels, numBundles int) *declcf bundle.Properties = append(bundle.Properties, property.MustBuildCSVMetadata(csv)) fbc.Bundles = append(fbc.Bundles, bundle) - chIdx := rand.Intn(numChannels) + p, err := rand.Int(rand.Reader, big.NewInt(int64(numChannels))) + if err != nil { + b.Error(err) + } + chIdx := p.Int64() ch := fbc.Channels[chIdx] replaces := "" if len(ch.Entries) > 0 { diff --git a/alpha/declcfg/load_test.go b/alpha/declcfg/load_test.go index 9f8f90fda..392fbd795 100644 --- a/alpha/declcfg/load_test.go +++ b/alpha/declcfg/load_test.go @@ -989,9 +989,9 @@ func TestLoadFile(t *testing.T) { path: "unrecognized-schema.json", assertion: require.NoError, expect: func(t *testing.T, d *DeclarativeConfig) { - require.Equal(t, 1, len(d.Packages)) - require.Equal(t, 1, len(d.Bundles)) - require.Equal(t, 1, len(d.Others)) + require.Len(t, d.Packages, 1) + require.Len(t, d.Bundles, 1) + require.Len(t, d.Others, 1) }, }, { @@ -1000,9 +1000,9 @@ func TestLoadFile(t *testing.T) { path: "etcd.yaml", assertion: require.NoError, expect: func(t *testing.T, d *DeclarativeConfig) { - require.Equal(t, 1, len(d.Packages)) - require.Equal(t, 6, len(d.Bundles)) - require.Equal(t, 0, len(d.Others)) + require.Len(t, d.Packages, 1) + require.Len(t, d.Bundles, 6) + require.Empty(t, d.Others) }, }, { @@ -1011,10 +1011,10 @@ func TestLoadFile(t *testing.T) { path: "deprecations.yaml", assertion: require.NoError, expect: func(t *testing.T, d *DeclarativeConfig) { - require.Equal(t, 0, len(d.Packages)) - require.Equal(t, 0, len(d.Bundles)) - require.Equal(t, 0, len(d.Others)) - require.Equal(t, 1, len(d.Deprecations)) + require.Empty(t, d.Packages) + require.Empty(t, d.Bundles) + require.Empty(t, d.Others) + require.Len(t, d.Deprecations, 1) }, }, } diff --git a/alpha/declcfg/model_to_declcfg_test.go b/alpha/declcfg/model_to_declcfg_test.go index b5e18dd03..07fe7d577 100644 --- a/alpha/declcfg/model_to_declcfg_test.go +++ b/alpha/declcfg/model_to_declcfg_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/operator-framework/operator-registry/alpha/model" ) @@ -26,7 +27,7 @@ func TestConvertFromModel(t *testing.T) { for _, s := range specs { t.Run(s.name, func(t *testing.T) { s.m.Normalize() - assert.NoError(t, s.m.Validate()) + require.NoError(t, s.m.Validate()) actual := ConvertFromModel(s.m) removeJSONWhitespace(&s.expectCfg) diff --git a/alpha/declcfg/write_test.go b/alpha/declcfg/write_test.go index 9e98e4c54..eca428768 100644 --- a/alpha/declcfg/write_test.go +++ b/alpha/declcfg/write_test.go @@ -500,13 +500,13 @@ func removeJSONWhitespace(cfg *DeclarativeConfig) { for ib := range cfg.Bundles { for ip := range cfg.Bundles[ib].Properties { var buf bytes.Buffer - json.Compact(&buf, cfg.Bundles[ib].Properties[ip].Value) + _ = json.Compact(&buf, cfg.Bundles[ib].Properties[ip].Value) cfg.Bundles[ib].Properties[ip].Value = buf.Bytes() } } for io := range cfg.Others { var buf bytes.Buffer - json.Compact(&buf, cfg.Others[io].Blob) + _ = json.Compact(&buf, cfg.Others[io].Blob) cfg.Others[io].Blob = buf.Bytes() } } diff --git a/pkg/api/conversion_test.go b/pkg/api/conversion_test.go index 011c58298..3dee1c0a5 100644 --- a/pkg/api/conversion_test.go +++ b/pkg/api/conversion_test.go @@ -39,7 +39,7 @@ func TestConvertModelBundleToAPIBundle(t *testing.T) { } const ( - csvJson = "{\"apiVersion\":\"operators.coreos.com/v1alpha1\",\"kind\":\"ClusterServiceVersion\",\"metadata\":{\"annotations\":{\"alm-examples\":\"[\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdCluster\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example\\\"\\n },\\n \\\"spec\\\": {\\n \\\"size\\\": 3,\\n \\\"version\\\": \\\"3.2.13\\\"\\n }\\n },\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdRestore\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster-restore\\\"\\n },\\n \\\"spec\\\": {\\n \\\"etcdCluster\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster\\\"\\n },\\n \\\"backupStorageType\\\": \\\"S3\\\",\\n \\\"s3\\\": {\\n \\\"path\\\": \\\"\\u003cfull-s3-path\\u003e\\\",\\n \\\"awsSecret\\\": \\\"\\u003caws-secret\\u003e\\\"\\n }\\n }\\n },\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdBackup\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster-backup\\\"\\n },\\n \\\"spec\\\": {\\n \\\"etcdEndpoints\\\": [\\\"\\u003cetcd-cluster-endpoints\\u003e\\\"],\\n \\\"storageType\\\":\\\"S3\\\",\\n \\\"s3\\\": {\\n \\\"path\\\": \\\"\\u003cfull-s3-path\\u003e\\\",\\n \\\"awsSecret\\\": \\\"\\u003caws-secret\\u003e\\\"\\n }\\n }\\n }\\n]\\n\",\"capabilities\":\"Full Lifecycle\",\"categories\":\"Database\",\"containerImage\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"createdAt\":\"2019-02-28 01:03:00\",\"description\":\"Create and maintain highly-available etcd clusters on Kubernetes\",\"repository\":\"https://github.com/coreos/etcd-operator\",\"tectonic-visibility\":\"ocs\"},\"name\":\"etcdoperator.v0.9.4\",\"namespace\":\"placeholder\"},\"spec\":{\"relatedImages\":[{\"name\":\"etcdv0.9.4\",\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\"}],\"customresourcedefinitions\":{\"owned\":[{\"description\":\"Represents a cluster of etcd nodes.\",\"displayName\":\"etcd Cluster\",\"kind\":\"EtcdCluster\",\"name\":\"etcdclusters.etcd.database.coreos.com\",\"resources\":[{\"kind\":\"Service\",\"version\":\"v1\"},{\"kind\":\"Pod\",\"version\":\"v1\"}],\"specDescriptors\":[{\"description\":\"The desired number of member Pods for the etcd cluster.\",\"displayName\":\"Size\",\"path\":\"size\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:podCount\"]},{\"description\":\"Limits describes the minimum/maximum amount of compute resources required/allowed\",\"displayName\":\"Resource Requirements\",\"path\":\"pod.resources\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:resourceRequirements\"]}],\"statusDescriptors\":[{\"description\":\"The status of each of the member Pods for the etcd cluster.\",\"displayName\":\"Member Status\",\"path\":\"members\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:podStatuses\"]},{\"description\":\"The service at which the running etcd cluster can be accessed.\",\"displayName\":\"Service\",\"path\":\"serviceName\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Service\"]},{\"description\":\"The current size of the etcd cluster.\",\"displayName\":\"Cluster Size\",\"path\":\"size\"},{\"description\":\"The current version of the etcd cluster.\",\"displayName\":\"Current Version\",\"path\":\"currentVersion\"},{\"description\":\"The target version of the etcd cluster, after upgrading.\",\"displayName\":\"Target Version\",\"path\":\"targetVersion\"},{\"description\":\"The current status of the etcd cluster.\",\"displayName\":\"Status\",\"path\":\"phase\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase\"]},{\"description\":\"Explanation for the current status of the cluster.\",\"displayName\":\"Status Details\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"},{\"description\":\"Represents the intent to backup an etcd cluster.\",\"displayName\":\"etcd Backup\",\"kind\":\"EtcdBackup\",\"name\":\"etcdbackups.etcd.database.coreos.com\",\"specDescriptors\":[{\"description\":\"Specifies the endpoints of an etcd cluster.\",\"displayName\":\"etcd Endpoint(s)\",\"path\":\"etcdEndpoints\",\"x-descriptors\":[\"urn:alm:descriptor:etcd:endpoint\"]},{\"description\":\"The full AWS S3 path where the backup is saved.\",\"displayName\":\"S3 Path\",\"path\":\"s3.path\",\"x-descriptors\":[\"urn:alm:descriptor:aws:s3:path\"]},{\"description\":\"The name of the secret object that stores the AWS credential and config files.\",\"displayName\":\"AWS Secret\",\"path\":\"s3.awsSecret\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Secret\"]}],\"statusDescriptors\":[{\"description\":\"Indicates if the backup was successful.\",\"displayName\":\"Succeeded\",\"path\":\"succeeded\",\"x-descriptors\":[\"urn:alm:descriptor:text\"]},{\"description\":\"Indicates the reason for any backup related failures.\",\"displayName\":\"Reason\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"},{\"description\":\"Represents the intent to restore an etcd cluster from a backup.\",\"displayName\":\"etcd Restore\",\"kind\":\"EtcdRestore\",\"name\":\"etcdrestores.etcd.database.coreos.com\",\"specDescriptors\":[{\"description\":\"References the EtcdCluster which should be restored,\",\"displayName\":\"etcd Cluster\",\"path\":\"etcdCluster.name\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:EtcdCluster\",\"urn:alm:descriptor:text\"]},{\"description\":\"The full AWS S3 path where the backup is saved.\",\"displayName\":\"S3 Path\",\"path\":\"s3.path\",\"x-descriptors\":[\"urn:alm:descriptor:aws:s3:path\"]},{\"description\":\"The name of the secret object that stores the AWS credential and config files.\",\"displayName\":\"AWS Secret\",\"path\":\"s3.awsSecret\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Secret\"]}],\"statusDescriptors\":[{\"description\":\"Indicates if the restore was successful.\",\"displayName\":\"Succeeded\",\"path\":\"succeeded\",\"x-descriptors\":[\"urn:alm:descriptor:text\"]},{\"description\":\"Indicates the reason for any restore related failures.\",\"displayName\":\"Reason\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"}]},\"description\":\"The etcd Operater creates and maintains highly-available etcd clusters on Kubernetes, allowing engineers to easily deploy and manage etcd clusters for their applications.\\n\\netcd is a distributed key value store that provides a reliable way to store data across a cluster of machines. It’s open-source and available on GitHub. etcd gracefully handles leader elections during network partitions and will tolerate machine failure, including the leader.\\n\\n\\n### Reading and writing to etcd\\n\\nCommunicate with etcd though its command line utility `etcdctl` via port forwarding:\\n\\n $ kubectl --namespace default port-forward service/example-client 2379:2379\\n $ etcdctl --endpoints http://127.0.0.1:2379 get /\\n\\nOr directly to the API using the Kubernetes Service:\\n\\n $ etcdctl --endpoints http://example-client.default.svc:2379 get /\\n\\nBe sure to secure your etcd cluster (see Common Configurations) before exposing it outside of the namespace or cluster.\\n\\n\\n### Supported Features\\n\\n* **High availability** - Multiple instances of etcd are networked together and secured. Individual failures or networking issues are transparently handled to keep your cluster up and running.\\n\\n* **Automated updates** - Rolling out a new etcd version works like all Kubernetes rolling updates. Simply declare the desired version, and the etcd service starts a safe rolling update to the new version automatically.\\n\\n* **Backups included** - Create etcd backups and restore them through the etcd Operator.\\n\\n### Common Configurations\\n\\n* **Configure TLS** - Specify [static TLS certs](https://github.com/coreos/etcd-operator/blob/master/doc/user/cluster_tls.md) as Kubernetes secrets.\\n\\n* **Set Node Selector and Affinity** - [Spread your etcd Pods](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-node-selector-and-anti-affinity-across-nodes) across Nodes and availability zones.\\n\\n* **Set Resource Limits** - [Set the Kubernetes limit and request](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-resource-requirement) values for your etcd Pods.\\n\\n* **Customize Storage** - [Set a custom StorageClass](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#custom-persistentvolumeclaim-definition) that you would like to use.\\n\",\"displayName\":\"etcd\",\"icon\":[{\"base64data\":\"iVBORw0KGgoAAAANSUhEUgAAAOEAAADZCAYAAADWmle6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAEKlJREFUeNrsndt1GzkShmEev4sTgeiHfRYdgVqbgOgITEVgOgLTEQydwIiKwFQCayoCU6+7DyYjsBiBFyVVz7RkXvqCSxXw/+f04XjGQ6IL+FBVuL769euXgZ7r39f/G9iP0X+u/jWDNZzZdGI/Ftama1jjuV4BwmcNpbAf1Fgu+V/9YRvNAyzT2a59+/GT/3hnn5m16wKWedJrmOCxkYztx9Q+py/+E0GJxtJdReWfz+mxNt+QzS2Mc0AI+HbBBwj9QViKbH5t64DsP2fvmGXUkWU4WgO+Uve2YQzBUGd7r+zH2ZG/tiUQc4QxKwgbwFfVGwwmdLL5wH78aPC/ZBem9jJpCAX3xtcNASSNgJLzUPSQyjB1zQNl8IQJ9MIU4lx2+Jo72ysXYKl1HSzN02BMa/vbZ5xyNJIshJzwf3L0dQhJw4Sih/SFw9Tk8sVeghVPoefaIYCkMZCKbrcP9lnZuk0uPUjGE/KE8JQry7W2tgfuC3vXgvNV+qSQbyFtAtyWk7zWiYevvuUQ9QEQCvJ+5mmu6dTjz1zFHLFj8Eb87MtxaZh/IQFIHom+9vgTWwZxAQjT9X4vtbEVPojwjiV471s00mhAckpwGuCn1HtFtRDaSh6y9zsL+LNBvCG/24ThcxHObdlWc1v+VQJe8LcO0jwtuF8BwnAAUgP9M8JPU2Me+Oh12auPGT6fHuTePE3bLDy+x9pTLnhMn+07TQGh//Bz1iI0c6kvtqInjvPZcYR3KsPVmUsPYt9nFig9SCY8VQNhpPBzn952bbgcsk2EvM89wzh3UEffBbyPqvBUBYQ8ODGPFOLsa7RF096WJ69L+E4EmnpjWu5o4ChlKaRTKT39RMMaVPEQRsz/nIWlDN80chjdJlSd1l0pJCAMVZsniobQVuxceMM9OFoaMd9zqZtjMEYYDW38Drb8Y0DYPLShxn0pvIFuOSxd7YCPet9zk452wsh54FJoeN05hcgSQoG5RR0Qh9Q4E4VvL4wcZq8UACgaRFEQKgSwWrkr5WFnGxiHSutqJGlXjBgIOayhwYBTA0ER0oisIVSUV0AAMT0IASCUO4hRIQSAEECMCCEPwqyQA0JCQBzEGjWNAqHiUVAoXUWbvggOIQCEAOJzxTjoaQ4AIaE64/aZridUsBYUgkhB15oGg1DBIl8IqirYwV6hPSGBSFteMCUBSVXwfYixBmamRubeMyjzMJQBDDowE3OesDD+zwqFoDqiEwXoXJpljB+PvWJGy75BKF1FPxhKygJuqUdYQGlLxNEXkrYyjQ0GbaAwEnUIlLRNvVjQDYUAsJB0HKLE4y0AIpQNgCIhBIhQTgCKhZBBpAN/v6LtQI50JfUgYOnnjmLUFHKhjxbAmdTCaTiBm3ovLPqG2urWAij6im0Nd9aTN9ygLUEt9LgSRnohxUPIKxlGaE+/6Y7znFf0yX+GnkvFFWmarkab2o9PmTeq8sbd2a7DaysXz7i64VeznN4jCQhN9gdDbRiuWrfrsq0mHIrlaq+hlotCtd3Um9u0BYWY8y5D67wccJoZjFca7iUs9VqZcfsZwTd1sbWGG+OcYaTnPAP7rTQVVlM4Sg3oGvB1tmNh0t/HKXZ1jFoIMwCQjtqbhNxUmkGYqgZEDZP11HN/S3gAYRozf0l8C5kKEKUvW0t1IfeWG/5MwgheZTT1E0AEhDkAePQO+Ig2H3DncAkQM4cwUQCD530dU4B5Yvmi2LlDqXfWrxMCcMth51RToRMNUXFnfc2KJ0+Ryl0VNOUwlhh6NoxK5gnViTgQpUG4SqSyt5z3zRJpuKmt3Q1614QaCBPaN6je+2XiFcWAKOXcUfIYKRyL/1lb7pe5VxSxxjQ6hImshqGRt5GWZVKO6q2wHwujfwDtIvaIdexj8Cm8+a68EqMfox6x/voMouZF4dHnEGNeCDMwT6vdNfekH1MafMk4PI06YtqLVGl95aEM9Z5vAeCTOA++YLtoVJRrsqNCaJ6WRmkdYaNec5BT/lcTRMqrhmwfjbpkj55+OKp8IEbU/JLgPJE6Wa3TTe9sHS+ShVD5QIyqIxMEwKh12olC6mHIed5ewEop80CNlfIOADYOT2nd6ZXCop+Ebqchc0JqxKcKASxChycJgUh1rnHA5ow9eTrhqNI7JWiAYYwBGGdpyNLoGw0Pkh96h1BpHihyywtATDM/7Hk2fN9EnH8BgKJCU4ooBkbXFMZJiPbrOyecGl3zgQDQL4hk10IZiOe+5w99Q/gBAEIJgPhJM4QAEEoFREAIAAEiIASAkD8Qt4AQAEIAERAGFlX4CACKAXGVM4ivMwWwCLFAlyeoaa70QePKm5Dlp+/n+ye/5dYgva6YsUaVeMa+tzNFeJtWwc+udbJ0Fg399kLielQJ5Ze61c2+7ytA6EZetiPxZC6tj22yJCv6jUwOyj/zcbqAxOMyAKEbfeHtNa7DtYXptjsk2kJxR+eIeim/tHNofUKYy8DMrQcAKWz6brpvzyIAlpwPhQ49l6b7skJf5Z+YTOYQc4FwLDxvoTDwaygQK+U/kVr+ytSFBG01Q3gnJJR4cNiAhx4HDub8/b5DULXlj6SVZghFiE+LdvE9vo/o8Lp1RmH5hzm0T6wdbZ6n+D6i44zDRc3ln6CpAEJfXiRU45oqLz8gFAThWsh7ughrRibc0QynHgZpNJa/ENJ+loCwu/qOGnFIjYR/n7TfgycULhcQhu6VC+HfF+L3BoAQ4WiZTw1M+FPCnA2gKC6/FAhXgDC+ojQGh3NuWsvfF1L/D5ohlCKtl1j2ldu9a/nPAKFwN56Bst10zCG0CPleXN/zXPgHQZXaZaBgrbzyY5V/mUA+6F0hwtGN9rwu5DVZPuwWqfxdFz1LWbJ2lwKEa+0Qsm4Dl3fp+Pu0lV97PgwIPfSsS+UQhj5Oo+vvFULazRIQyvGEcxPuNLCth2MvFsrKn8UOilAQShkh7TTczYNMoS6OdP47msrPi82lXKGWhCdMZYS0bFy+vcnGAjP1CIfvgbKNA9glecEH9RD6Ol4wRuWyN/G9MHnksS6o/GPf5XcwNSUlHzQhDuAKtWJmkwKElU7lylP5rgIcsquh/FI8YZCDpkJBuE4FQm7Icw8N+SrUGaQKyi8FwiDt1ve5o+Vu7qYHy/psgK8cvh+FTYuO77bhEC7GuaPiys/L1X4IgXDL+e3M5+ovLxBy5VLuIebw1oqcHoPfoaMJUsHays878r8KbDc3xtPx/84gZPBG/JwaufrsY/SRG/OY3//8QMNdsvdZCFtbW6f8pFuf5bflILAlX7O+4fdfugKyFYS8T2zAsXthdG0VurPGKwI06oF5vkBgHWkNp6ry29+lsPZMU3vijnXFNmoclr+6+Ou/FIb8yb30sS8YGjmTqCLyQsi5N/6ZwKs0Yenj68pfPjF6N782Dp2FzV9CTyoSeY8mLK16qGxIkLI8oa1n8tz9juP40DlK0epxYEbojbq+9QfurBeVIlCO9D2396bxiV4lkYQ3hOAFw2pbhqMGISkkQOMcQ9EqhDmGZZdo92JC0YHRNTfoSg+5e0IT+opqCKHoIU+4ztQIgBD1EFNrQAgIpYSil9lDmPHqkROPt+JC6AgPquSuumJmg0YARVCuneDfvPVeJokZ6pIXDkNxQtGzTF9/BQjRG0tQznfb74RwCQghpALBtIQnfK4zhxdyQvVCUeknMIT3hLyY+T5jo0yABqKPQNpUNw/09tGZod5jgCaYFxyYvJcNPkv9eof+I3pnCFEHIETjSM8L9tHZHYCQT9PaZGycU6yg8S4akDnJ+P03L0+t23XGzCLzRgII/Wqa+fv/xlfvmKvMUOcOrlCDdoei1MGdZm6G5VEIfRzzjd4aQs69n699Rx7ewhvCGzr2gmTPs8zNsJOrXt24FbkhhOjCfT4ICA/rPbyhUy94Dks0gJCX1NzCZui9YUd3oei+c257TalFbgg19ILHrlrL2gvWgXAL26EX76gZTNASQnad8Ibwhl284NhgXpB0c+jKhWO3Ms1hP9ihJYB9eMF6qd1BCPk0qA1s+LimFIu7m4nsdQIzPK4VbQ8hYvrnuSH2G9b2ggP78QmWqBdF9Vx8SSY6QYdUW7BTA1schZATyhvY8lHvcRbNUS9YGFy2U+qmzh2YPVc0I7yAOFyHfRpyUwtCSzOdPXMHmz7qDIM0e0V2wZTEk+6Ym6N63eBLp/b5Bts+2cKCSJ/LuoZO3ANSiE5hKAZjnvNSS4931jcw9jpwT0feV/qSJ1pVtCyfHKDkvK8Ejx7pUxGh2xFNSwx8QTi2H9ceC0/nni64MS/5N5dG39pDqvRV+WgGk71c9VFXF9b+xYvOw/d61iv7m3MvEHryhvecwC52jSSx4VIIgwnMNT/UsTxIgpPt3K/ARj15CptwL3Zd/ceDSATj2DGQjbxgWwhdeMMte7zpy5On9vymRm/YxBYljGVjKWF9VJf7I1+sex3wY8w/V1QPTborW/72gkdsRDaZMJBdbdHIC7aCkAu9atlLbtnrzerMnyToDaGwelOnk3/hHSem/ZK7e/t7jeeR20LYBgqa8J80gS8jbwi5F02Uj1u2NYJxap8PLkJfLxA2hIJyvnHX/AfeEPLpBfe0uSFHbnXaea3Qd5d6HcpYZ8L6M7lnFwMQ3MNg+RxUR1+6AshtbsVgfXTEg1sIGax9UND2p7f270wdG3eK9gXVGHdw2k5sOyZv+Nbs39Z308XR9DqWb2J+PwKDhuKHPobfuXf7gnYGHdCs7bhDDadD4entDug7LWNsnRNW4mYqwJ9dk+GGSTPBiA2j0G8RWNM5upZtcG4/3vMfP7KnbK2egx6CCnDPhRn7NgD3cghLIad5WcM2SO38iqHvvMOosyeMpQ5zlVCaaj06GVs9xUbHdiKoqrHWgquFEFMWUEWfXUxJAML23hAHFOctmjZQffKD2pywkhtSGHKNtpitLroscAeE7kCkSsC60vxEl6yMtL9EL5HKGCMszU5bk8gdkklAyEn5FO0yK419rIxBOIqwFMooDE0tHEVYijAUECIshRCGIhxFWIowFJ5QkEYIS5PTJrUwNGlPyN6QQPyKtpuM1E/K5+YJDV/MiA3AaehzqgAm7QnZG9IGYKo8bHnSK7VblLL3hOwNHziPuEGOqE5brrdR6i+atCfckyeWD47HkAkepRGLY/e8A8J0gCwYSNypF08bBm+e6zVz2UL4AshhBUjML/rXLefqC82bcQFhGC9JDwZ1uuu+At0S5gCETYHsV4DUeD9fDN2Zfy5OXaW2zAwQygCzBLJ8cvaW5OXKC1FxfTggFAHmoAJnSiOw2wps9KwRWgJCLaEswaj5NqkLwAYIU4BxqTSXbHXpJdRMPZgAOiAMqABCNGYIEEJutEK5IUAIwYMDQgiCACEEAcJs1Vda7gGqDhCmoiEghAAhBAHCrKXVo2C1DCBMRlp37uMIEECoX7xrX3P5C9QiINSuIcoPAUI0YkAICLNWgfJDh4T9hH7zqYH9+JHAq7zBqWjwhPAicTVCVQJCNF50JghHocahKK0X/ZnQKyEkhSdUpzG8OgQI42qC94EQjsYLRSmH+pbgq73L6bYkeEJ4DYTYmeg1TOBFc/usTTp3V9DdEuXJ2xDCUbXhaXk0/kAYmBvuMB4qkC35E5e5AMKkwSQgyxufyuPy6fMMgAFCSI73LFXU/N8AmEL9X4ABACNSKMHAgb34AAAAAElFTkSuQmCC\",\"mediatype\":\"image/png\"}],\"install\":{\"spec\":{\"deployments\":[{\"name\":\"etcd-operator\",\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"name\":\"etcd-operator-alm-owned\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"etcd-operator-alm-owned\"},\"name\":\"etcd-operator-alm-owned\"},\"spec\":{\"containers\":[{\"command\":[\"etcd-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-operator\"},{\"command\":[\"etcd-backup-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-backup-operator\"},{\"command\":[\"etcd-restore-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-restore-operator\"}],\"serviceAccountName\":\"etcd-operator\"}}}}],\"permissions\":[{\"rules\":[{\"apiGroups\":[\"etcd.database.coreos.com\"],\"resources\":[\"etcdclusters\",\"etcdbackups\",\"etcdrestores\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"\"],\"resources\":[\"pods\",\"services\",\"endpoints\",\"persistentvolumeclaims\",\"events\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"apps\"],\"resources\":[\"deployments\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"\"],\"resources\":[\"secrets\"],\"verbs\":[\"get\"]}],\"serviceAccountName\":\"etcd-operator\"}]},\"strategy\":\"deployment\"},\"installModes\":[{\"supported\":true,\"type\":\"OwnNamespace\"},{\"supported\":true,\"type\":\"SingleNamespace\"},{\"supported\":false,\"type\":\"MultiNamespace\"},{\"supported\":false,\"type\":\"AllNamespaces\"}],\"keywords\":[\"etcd\",\"key value\",\"database\",\"coreos\",\"open source\"],\"labels\":{\"alm-owner-etcd\":\"etcdoperator\",\"operated-by\":\"etcdoperator\"},\"links\":[{\"name\":\"Blog\",\"url\":\"https://coreos.com/etcd\"},{\"name\":\"Documentation\",\"url\":\"https://coreos.com/operators/etcd/docs/latest/\"},{\"name\":\"etcd Operator Source Code\",\"url\":\"https://github.com/coreos/etcd-operator\"}],\"maintainers\":[{\"email\":\"etcd-dev@googlegroups.com\",\"name\":\"etcd Community\"}],\"maturity\":\"alpha\",\"provider\":{\"name\":\"CNCF\"},\"replaces\":\"etcdoperator.v0.9.2\",\"selector\":{\"matchLabels\":{\"alm-owner-etcd\":\"etcdoperator\",\"operated-by\":\"etcdoperator\"}},\"version\":\"0.9.4\"}}" + csvJSON = "{\"apiVersion\":\"operators.coreos.com/v1alpha1\",\"kind\":\"ClusterServiceVersion\",\"metadata\":{\"annotations\":{\"alm-examples\":\"[\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdCluster\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example\\\"\\n },\\n \\\"spec\\\": {\\n \\\"size\\\": 3,\\n \\\"version\\\": \\\"3.2.13\\\"\\n }\\n },\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdRestore\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster-restore\\\"\\n },\\n \\\"spec\\\": {\\n \\\"etcdCluster\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster\\\"\\n },\\n \\\"backupStorageType\\\": \\\"S3\\\",\\n \\\"s3\\\": {\\n \\\"path\\\": \\\"\\u003cfull-s3-path\\u003e\\\",\\n \\\"awsSecret\\\": \\\"\\u003caws-secret\\u003e\\\"\\n }\\n }\\n },\\n {\\n \\\"apiVersion\\\": \\\"etcd.database.coreos.com/v1beta2\\\",\\n \\\"kind\\\": \\\"EtcdBackup\\\",\\n \\\"metadata\\\": {\\n \\\"name\\\": \\\"example-etcd-cluster-backup\\\"\\n },\\n \\\"spec\\\": {\\n \\\"etcdEndpoints\\\": [\\\"\\u003cetcd-cluster-endpoints\\u003e\\\"],\\n \\\"storageType\\\":\\\"S3\\\",\\n \\\"s3\\\": {\\n \\\"path\\\": \\\"\\u003cfull-s3-path\\u003e\\\",\\n \\\"awsSecret\\\": \\\"\\u003caws-secret\\u003e\\\"\\n }\\n }\\n }\\n]\\n\",\"capabilities\":\"Full Lifecycle\",\"categories\":\"Database\",\"containerImage\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"createdAt\":\"2019-02-28 01:03:00\",\"description\":\"Create and maintain highly-available etcd clusters on Kubernetes\",\"repository\":\"https://github.com/coreos/etcd-operator\",\"tectonic-visibility\":\"ocs\"},\"name\":\"etcdoperator.v0.9.4\",\"namespace\":\"placeholder\"},\"spec\":{\"relatedImages\":[{\"name\":\"etcdv0.9.4\",\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\"}],\"customresourcedefinitions\":{\"owned\":[{\"description\":\"Represents a cluster of etcd nodes.\",\"displayName\":\"etcd Cluster\",\"kind\":\"EtcdCluster\",\"name\":\"etcdclusters.etcd.database.coreos.com\",\"resources\":[{\"kind\":\"Service\",\"version\":\"v1\"},{\"kind\":\"Pod\",\"version\":\"v1\"}],\"specDescriptors\":[{\"description\":\"The desired number of member Pods for the etcd cluster.\",\"displayName\":\"Size\",\"path\":\"size\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:podCount\"]},{\"description\":\"Limits describes the minimum/maximum amount of compute resources required/allowed\",\"displayName\":\"Resource Requirements\",\"path\":\"pod.resources\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:resourceRequirements\"]}],\"statusDescriptors\":[{\"description\":\"The status of each of the member Pods for the etcd cluster.\",\"displayName\":\"Member Status\",\"path\":\"members\",\"x-descriptors\":[\"urn:alm:descriptor:com.tectonic.ui:podStatuses\"]},{\"description\":\"The service at which the running etcd cluster can be accessed.\",\"displayName\":\"Service\",\"path\":\"serviceName\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Service\"]},{\"description\":\"The current size of the etcd cluster.\",\"displayName\":\"Cluster Size\",\"path\":\"size\"},{\"description\":\"The current version of the etcd cluster.\",\"displayName\":\"Current Version\",\"path\":\"currentVersion\"},{\"description\":\"The target version of the etcd cluster, after upgrading.\",\"displayName\":\"Target Version\",\"path\":\"targetVersion\"},{\"description\":\"The current status of the etcd cluster.\",\"displayName\":\"Status\",\"path\":\"phase\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase\"]},{\"description\":\"Explanation for the current status of the cluster.\",\"displayName\":\"Status Details\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"},{\"description\":\"Represents the intent to backup an etcd cluster.\",\"displayName\":\"etcd Backup\",\"kind\":\"EtcdBackup\",\"name\":\"etcdbackups.etcd.database.coreos.com\",\"specDescriptors\":[{\"description\":\"Specifies the endpoints of an etcd cluster.\",\"displayName\":\"etcd Endpoint(s)\",\"path\":\"etcdEndpoints\",\"x-descriptors\":[\"urn:alm:descriptor:etcd:endpoint\"]},{\"description\":\"The full AWS S3 path where the backup is saved.\",\"displayName\":\"S3 Path\",\"path\":\"s3.path\",\"x-descriptors\":[\"urn:alm:descriptor:aws:s3:path\"]},{\"description\":\"The name of the secret object that stores the AWS credential and config files.\",\"displayName\":\"AWS Secret\",\"path\":\"s3.awsSecret\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Secret\"]}],\"statusDescriptors\":[{\"description\":\"Indicates if the backup was successful.\",\"displayName\":\"Succeeded\",\"path\":\"succeeded\",\"x-descriptors\":[\"urn:alm:descriptor:text\"]},{\"description\":\"Indicates the reason for any backup related failures.\",\"displayName\":\"Reason\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"},{\"description\":\"Represents the intent to restore an etcd cluster from a backup.\",\"displayName\":\"etcd Restore\",\"kind\":\"EtcdRestore\",\"name\":\"etcdrestores.etcd.database.coreos.com\",\"specDescriptors\":[{\"description\":\"References the EtcdCluster which should be restored,\",\"displayName\":\"etcd Cluster\",\"path\":\"etcdCluster.name\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:EtcdCluster\",\"urn:alm:descriptor:text\"]},{\"description\":\"The full AWS S3 path where the backup is saved.\",\"displayName\":\"S3 Path\",\"path\":\"s3.path\",\"x-descriptors\":[\"urn:alm:descriptor:aws:s3:path\"]},{\"description\":\"The name of the secret object that stores the AWS credential and config files.\",\"displayName\":\"AWS Secret\",\"path\":\"s3.awsSecret\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes:Secret\"]}],\"statusDescriptors\":[{\"description\":\"Indicates if the restore was successful.\",\"displayName\":\"Succeeded\",\"path\":\"succeeded\",\"x-descriptors\":[\"urn:alm:descriptor:text\"]},{\"description\":\"Indicates the reason for any restore related failures.\",\"displayName\":\"Reason\",\"path\":\"reason\",\"x-descriptors\":[\"urn:alm:descriptor:io.kubernetes.phase:reason\"]}],\"version\":\"v1beta2\"}]},\"description\":\"The etcd Operater creates and maintains highly-available etcd clusters on Kubernetes, allowing engineers to easily deploy and manage etcd clusters for their applications.\\n\\netcd is a distributed key value store that provides a reliable way to store data across a cluster of machines. It’s open-source and available on GitHub. etcd gracefully handles leader elections during network partitions and will tolerate machine failure, including the leader.\\n\\n\\n### Reading and writing to etcd\\n\\nCommunicate with etcd though its command line utility `etcdctl` via port forwarding:\\n\\n $ kubectl --namespace default port-forward service/example-client 2379:2379\\n $ etcdctl --endpoints http://127.0.0.1:2379 get /\\n\\nOr directly to the API using the Kubernetes Service:\\n\\n $ etcdctl --endpoints http://example-client.default.svc:2379 get /\\n\\nBe sure to secure your etcd cluster (see Common Configurations) before exposing it outside of the namespace or cluster.\\n\\n\\n### Supported Features\\n\\n* **High availability** - Multiple instances of etcd are networked together and secured. Individual failures or networking issues are transparently handled to keep your cluster up and running.\\n\\n* **Automated updates** - Rolling out a new etcd version works like all Kubernetes rolling updates. Simply declare the desired version, and the etcd service starts a safe rolling update to the new version automatically.\\n\\n* **Backups included** - Create etcd backups and restore them through the etcd Operator.\\n\\n### Common Configurations\\n\\n* **Configure TLS** - Specify [static TLS certs](https://github.com/coreos/etcd-operator/blob/master/doc/user/cluster_tls.md) as Kubernetes secrets.\\n\\n* **Set Node Selector and Affinity** - [Spread your etcd Pods](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-node-selector-and-anti-affinity-across-nodes) across Nodes and availability zones.\\n\\n* **Set Resource Limits** - [Set the Kubernetes limit and request](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-resource-requirement) values for your etcd Pods.\\n\\n* **Customize Storage** - [Set a custom StorageClass](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#custom-persistentvolumeclaim-definition) that you would like to use.\\n\",\"displayName\":\"etcd\",\"icon\":[{\"base64data\":\"iVBORw0KGgoAAAANSUhEUgAAAOEAAADZCAYAAADWmle6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAEKlJREFUeNrsndt1GzkShmEev4sTgeiHfRYdgVqbgOgITEVgOgLTEQydwIiKwFQCayoCU6+7DyYjsBiBFyVVz7RkXvqCSxXw/+f04XjGQ6IL+FBVuL769euXgZ7r39f/G9iP0X+u/jWDNZzZdGI/Ftama1jjuV4BwmcNpbAf1Fgu+V/9YRvNAyzT2a59+/GT/3hnn5m16wKWedJrmOCxkYztx9Q+py/+E0GJxtJdReWfz+mxNt+QzS2Mc0AI+HbBBwj9QViKbH5t64DsP2fvmGXUkWU4WgO+Uve2YQzBUGd7r+zH2ZG/tiUQc4QxKwgbwFfVGwwmdLL5wH78aPC/ZBem9jJpCAX3xtcNASSNgJLzUPSQyjB1zQNl8IQJ9MIU4lx2+Jo72ysXYKl1HSzN02BMa/vbZ5xyNJIshJzwf3L0dQhJw4Sih/SFw9Tk8sVeghVPoefaIYCkMZCKbrcP9lnZuk0uPUjGE/KE8JQry7W2tgfuC3vXgvNV+qSQbyFtAtyWk7zWiYevvuUQ9QEQCvJ+5mmu6dTjz1zFHLFj8Eb87MtxaZh/IQFIHom+9vgTWwZxAQjT9X4vtbEVPojwjiV471s00mhAckpwGuCn1HtFtRDaSh6y9zsL+LNBvCG/24ThcxHObdlWc1v+VQJe8LcO0jwtuF8BwnAAUgP9M8JPU2Me+Oh12auPGT6fHuTePE3bLDy+x9pTLnhMn+07TQGh//Bz1iI0c6kvtqInjvPZcYR3KsPVmUsPYt9nFig9SCY8VQNhpPBzn952bbgcsk2EvM89wzh3UEffBbyPqvBUBYQ8ODGPFOLsa7RF096WJ69L+E4EmnpjWu5o4ChlKaRTKT39RMMaVPEQRsz/nIWlDN80chjdJlSd1l0pJCAMVZsniobQVuxceMM9OFoaMd9zqZtjMEYYDW38Drb8Y0DYPLShxn0pvIFuOSxd7YCPet9zk452wsh54FJoeN05hcgSQoG5RR0Qh9Q4E4VvL4wcZq8UACgaRFEQKgSwWrkr5WFnGxiHSutqJGlXjBgIOayhwYBTA0ER0oisIVSUV0AAMT0IASCUO4hRIQSAEECMCCEPwqyQA0JCQBzEGjWNAqHiUVAoXUWbvggOIQCEAOJzxTjoaQ4AIaE64/aZridUsBYUgkhB15oGg1DBIl8IqirYwV6hPSGBSFteMCUBSVXwfYixBmamRubeMyjzMJQBDDowE3OesDD+zwqFoDqiEwXoXJpljB+PvWJGy75BKF1FPxhKygJuqUdYQGlLxNEXkrYyjQ0GbaAwEnUIlLRNvVjQDYUAsJB0HKLE4y0AIpQNgCIhBIhQTgCKhZBBpAN/v6LtQI50JfUgYOnnjmLUFHKhjxbAmdTCaTiBm3ovLPqG2urWAij6im0Nd9aTN9ygLUEt9LgSRnohxUPIKxlGaE+/6Y7znFf0yX+GnkvFFWmarkab2o9PmTeq8sbd2a7DaysXz7i64VeznN4jCQhN9gdDbRiuWrfrsq0mHIrlaq+hlotCtd3Um9u0BYWY8y5D67wccJoZjFca7iUs9VqZcfsZwTd1sbWGG+OcYaTnPAP7rTQVVlM4Sg3oGvB1tmNh0t/HKXZ1jFoIMwCQjtqbhNxUmkGYqgZEDZP11HN/S3gAYRozf0l8C5kKEKUvW0t1IfeWG/5MwgheZTT1E0AEhDkAePQO+Ig2H3DncAkQM4cwUQCD530dU4B5Yvmi2LlDqXfWrxMCcMth51RToRMNUXFnfc2KJ0+Ryl0VNOUwlhh6NoxK5gnViTgQpUG4SqSyt5z3zRJpuKmt3Q1614QaCBPaN6je+2XiFcWAKOXcUfIYKRyL/1lb7pe5VxSxxjQ6hImshqGRt5GWZVKO6q2wHwujfwDtIvaIdexj8Cm8+a68EqMfox6x/voMouZF4dHnEGNeCDMwT6vdNfekH1MafMk4PI06YtqLVGl95aEM9Z5vAeCTOA++YLtoVJRrsqNCaJ6WRmkdYaNec5BT/lcTRMqrhmwfjbpkj55+OKp8IEbU/JLgPJE6Wa3TTe9sHS+ShVD5QIyqIxMEwKh12olC6mHIed5ewEop80CNlfIOADYOT2nd6ZXCop+Ebqchc0JqxKcKASxChycJgUh1rnHA5ow9eTrhqNI7JWiAYYwBGGdpyNLoGw0Pkh96h1BpHihyywtATDM/7Hk2fN9EnH8BgKJCU4ooBkbXFMZJiPbrOyecGl3zgQDQL4hk10IZiOe+5w99Q/gBAEIJgPhJM4QAEEoFREAIAAEiIASAkD8Qt4AQAEIAERAGFlX4CACKAXGVM4ivMwWwCLFAlyeoaa70QePKm5Dlp+/n+ye/5dYgva6YsUaVeMa+tzNFeJtWwc+udbJ0Fg399kLielQJ5Ze61c2+7ytA6EZetiPxZC6tj22yJCv6jUwOyj/zcbqAxOMyAKEbfeHtNa7DtYXptjsk2kJxR+eIeim/tHNofUKYy8DMrQcAKWz6brpvzyIAlpwPhQ49l6b7skJf5Z+YTOYQc4FwLDxvoTDwaygQK+U/kVr+ytSFBG01Q3gnJJR4cNiAhx4HDub8/b5DULXlj6SVZghFiE+LdvE9vo/o8Lp1RmH5hzm0T6wdbZ6n+D6i44zDRc3ln6CpAEJfXiRU45oqLz8gFAThWsh7ughrRibc0QynHgZpNJa/ENJ+loCwu/qOGnFIjYR/n7TfgycULhcQhu6VC+HfF+L3BoAQ4WiZTw1M+FPCnA2gKC6/FAhXgDC+ojQGh3NuWsvfF1L/D5ohlCKtl1j2ldu9a/nPAKFwN56Bst10zCG0CPleXN/zXPgHQZXaZaBgrbzyY5V/mUA+6F0hwtGN9rwu5DVZPuwWqfxdFz1LWbJ2lwKEa+0Qsm4Dl3fp+Pu0lV97PgwIPfSsS+UQhj5Oo+vvFULazRIQyvGEcxPuNLCth2MvFsrKn8UOilAQShkh7TTczYNMoS6OdP47msrPi82lXKGWhCdMZYS0bFy+vcnGAjP1CIfvgbKNA9glecEH9RD6Ol4wRuWyN/G9MHnksS6o/GPf5XcwNSUlHzQhDuAKtWJmkwKElU7lylP5rgIcsquh/FI8YZCDpkJBuE4FQm7Icw8N+SrUGaQKyi8FwiDt1ve5o+Vu7qYHy/psgK8cvh+FTYuO77bhEC7GuaPiys/L1X4IgXDL+e3M5+ovLxBy5VLuIebw1oqcHoPfoaMJUsHays878r8KbDc3xtPx/84gZPBG/JwaufrsY/SRG/OY3//8QMNdsvdZCFtbW6f8pFuf5bflILAlX7O+4fdfugKyFYS8T2zAsXthdG0VurPGKwI06oF5vkBgHWkNp6ry29+lsPZMU3vijnXFNmoclr+6+Ou/FIb8yb30sS8YGjmTqCLyQsi5N/6ZwKs0Yenj68pfPjF6N782Dp2FzV9CTyoSeY8mLK16qGxIkLI8oa1n8tz9juP40DlK0epxYEbojbq+9QfurBeVIlCO9D2396bxiV4lkYQ3hOAFw2pbhqMGISkkQOMcQ9EqhDmGZZdo92JC0YHRNTfoSg+5e0IT+opqCKHoIU+4ztQIgBD1EFNrQAgIpYSil9lDmPHqkROPt+JC6AgPquSuumJmg0YARVCuneDfvPVeJokZ6pIXDkNxQtGzTF9/BQjRG0tQznfb74RwCQghpALBtIQnfK4zhxdyQvVCUeknMIT3hLyY+T5jo0yABqKPQNpUNw/09tGZod5jgCaYFxyYvJcNPkv9eof+I3pnCFEHIETjSM8L9tHZHYCQT9PaZGycU6yg8S4akDnJ+P03L0+t23XGzCLzRgII/Wqa+fv/xlfvmKvMUOcOrlCDdoei1MGdZm6G5VEIfRzzjd4aQs69n699Rx7ewhvCGzr2gmTPs8zNsJOrXt24FbkhhOjCfT4ICA/rPbyhUy94Dks0gJCX1NzCZui9YUd3oei+c257TalFbgg19ILHrlrL2gvWgXAL26EX76gZTNASQnad8Ibwhl284NhgXpB0c+jKhWO3Ms1hP9ihJYB9eMF6qd1BCPk0qA1s+LimFIu7m4nsdQIzPK4VbQ8hYvrnuSH2G9b2ggP78QmWqBdF9Vx8SSY6QYdUW7BTA1schZATyhvY8lHvcRbNUS9YGFy2U+qmzh2YPVc0I7yAOFyHfRpyUwtCSzOdPXMHmz7qDIM0e0V2wZTEk+6Ym6N63eBLp/b5Bts+2cKCSJ/LuoZO3ANSiE5hKAZjnvNSS4931jcw9jpwT0feV/qSJ1pVtCyfHKDkvK8Ejx7pUxGh2xFNSwx8QTi2H9ceC0/nni64MS/5N5dG39pDqvRV+WgGk71c9VFXF9b+xYvOw/d61iv7m3MvEHryhvecwC52jSSx4VIIgwnMNT/UsTxIgpPt3K/ARj15CptwL3Zd/ceDSATj2DGQjbxgWwhdeMMte7zpy5On9vymRm/YxBYljGVjKWF9VJf7I1+sex3wY8w/V1QPTborW/72gkdsRDaZMJBdbdHIC7aCkAu9atlLbtnrzerMnyToDaGwelOnk3/hHSem/ZK7e/t7jeeR20LYBgqa8J80gS8jbwi5F02Uj1u2NYJxap8PLkJfLxA2hIJyvnHX/AfeEPLpBfe0uSFHbnXaea3Qd5d6HcpYZ8L6M7lnFwMQ3MNg+RxUR1+6AshtbsVgfXTEg1sIGax9UND2p7f270wdG3eK9gXVGHdw2k5sOyZv+Nbs39Z308XR9DqWb2J+PwKDhuKHPobfuXf7gnYGHdCs7bhDDadD4entDug7LWNsnRNW4mYqwJ9dk+GGSTPBiA2j0G8RWNM5upZtcG4/3vMfP7KnbK2egx6CCnDPhRn7NgD3cghLIad5WcM2SO38iqHvvMOosyeMpQ5zlVCaaj06GVs9xUbHdiKoqrHWgquFEFMWUEWfXUxJAML23hAHFOctmjZQffKD2pywkhtSGHKNtpitLroscAeE7kCkSsC60vxEl6yMtL9EL5HKGCMszU5bk8gdkklAyEn5FO0yK419rIxBOIqwFMooDE0tHEVYijAUECIshRCGIhxFWIowFJ5QkEYIS5PTJrUwNGlPyN6QQPyKtpuM1E/K5+YJDV/MiA3AaehzqgAm7QnZG9IGYKo8bHnSK7VblLL3hOwNHziPuEGOqE5brrdR6i+atCfckyeWD47HkAkepRGLY/e8A8J0gCwYSNypF08bBm+e6zVz2UL4AshhBUjML/rXLefqC82bcQFhGC9JDwZ1uuu+At0S5gCETYHsV4DUeD9fDN2Zfy5OXaW2zAwQygCzBLJ8cvaW5OXKC1FxfTggFAHmoAJnSiOw2wps9KwRWgJCLaEswaj5NqkLwAYIU4BxqTSXbHXpJdRMPZgAOiAMqABCNGYIEEJutEK5IUAIwYMDQgiCACEEAcJs1Vda7gGqDhCmoiEghAAhBAHCrKXVo2C1DCBMRlp37uMIEECoX7xrX3P5C9QiINSuIcoPAUI0YkAICLNWgfJDh4T9hH7zqYH9+JHAq7zBqWjwhPAicTVCVQJCNF50JghHocahKK0X/ZnQKyEkhSdUpzG8OgQI42qC94EQjsYLRSmH+pbgq73L6bYkeEJ4DYTYmeg1TOBFc/usTTp3V9DdEuXJ2xDCUbXhaXk0/kAYmBvuMB4qkC35E5e5AMKkwSQgyxufyuPy6fMMgAFCSI73LFXU/N8AmEL9X4ABACNSKMHAgb34AAAAAElFTkSuQmCC\",\"mediatype\":\"image/png\"}],\"install\":{\"spec\":{\"deployments\":[{\"name\":\"etcd-operator\",\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"name\":\"etcd-operator-alm-owned\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"etcd-operator-alm-owned\"},\"name\":\"etcd-operator-alm-owned\"},\"spec\":{\"containers\":[{\"command\":[\"etcd-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-operator\"},{\"command\":[\"etcd-backup-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-backup-operator\"},{\"command\":[\"etcd-restore-operator\",\"--create-crd=false\"],\"env\":[{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}}],\"image\":\"quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b\",\"name\":\"etcd-restore-operator\"}],\"serviceAccountName\":\"etcd-operator\"}}}}],\"permissions\":[{\"rules\":[{\"apiGroups\":[\"etcd.database.coreos.com\"],\"resources\":[\"etcdclusters\",\"etcdbackups\",\"etcdrestores\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"\"],\"resources\":[\"pods\",\"services\",\"endpoints\",\"persistentvolumeclaims\",\"events\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"apps\"],\"resources\":[\"deployments\"],\"verbs\":[\"*\"]},{\"apiGroups\":[\"\"],\"resources\":[\"secrets\"],\"verbs\":[\"get\"]}],\"serviceAccountName\":\"etcd-operator\"}]},\"strategy\":\"deployment\"},\"installModes\":[{\"supported\":true,\"type\":\"OwnNamespace\"},{\"supported\":true,\"type\":\"SingleNamespace\"},{\"supported\":false,\"type\":\"MultiNamespace\"},{\"supported\":false,\"type\":\"AllNamespaces\"}],\"keywords\":[\"etcd\",\"key value\",\"database\",\"coreos\",\"open source\"],\"labels\":{\"alm-owner-etcd\":\"etcdoperator\",\"operated-by\":\"etcdoperator\"},\"links\":[{\"name\":\"Blog\",\"url\":\"https://coreos.com/etcd\"},{\"name\":\"Documentation\",\"url\":\"https://coreos.com/operators/etcd/docs/latest/\"},{\"name\":\"etcd Operator Source Code\",\"url\":\"https://github.com/coreos/etcd-operator\"}],\"maintainers\":[{\"email\":\"etcd-dev@googlegroups.com\",\"name\":\"etcd Community\"}],\"maturity\":\"alpha\",\"provider\":{\"name\":\"CNCF\"},\"replaces\":\"etcdoperator.v0.9.2\",\"selector\":{\"matchLabels\":{\"alm-owner-etcd\":\"etcdoperator\",\"operated-by\":\"etcdoperator\"}},\"version\":\"0.9.4\"}}" crdbackups = `{"apiVersion":"apiextensions.k8s.io/v1beta1","kind":"CustomResourceDefinition","metadata":{"name":"etcdbackups.etcd.database.coreos.com"},"spec":{"group":"etcd.database.coreos.com","names":{"kind":"EtcdBackup","listKind":"EtcdBackupList","plural":"etcdbackups","singular":"etcdbackup"},"scope":"Namespaced","version":"v1beta2"}}` crdclusters = `{"apiVersion":"apiextensions.k8s.io/v1beta1","kind":"CustomResourceDefinition","metadata":{"name":"etcdclusters.etcd.database.coreos.com"},"spec":{"group":"etcd.database.coreos.com","names":{"kind":"EtcdCluster","listKind":"EtcdClusterList","plural":"etcdclusters","shortNames":["etcdclus","etcd"],"singular":"etcdcluster"},"scope":"Namespaced","version":"v1beta2"}}` crdrestores = `{"apiVersion":"apiextensions.k8s.io/v1beta1","kind":"CustomResourceDefinition","metadata":{"name":"etcdrestores.etcd.database.coreos.com"},"spec":{"group":"etcd.database.coreos.com","names":{"kind":"EtcdRestore","listKind":"EtcdRestoreList","plural":"etcdrestores","singular":"etcdrestore"},"scope":"Namespaced","version":"v1beta2"}}` @@ -48,7 +48,7 @@ const ( func testModelBundle(t *testing.T) model.Bundle { t.Helper() var csv v1alpha1.ClusterServiceVersion - if err := json.Unmarshal([]byte(csvJson), &csv); err != nil { + if err := json.Unmarshal([]byte(csvJSON), &csv); err != nil { t.Fatalf("failed to unmarshal csv json: %v", err) } b := model.Bundle{ @@ -63,14 +63,14 @@ func testModelBundle(t *testing.T) model.Bundle { property.MustBuildGVK("etcd.database.coreos.com", "v1beta2", "EtcdBackup"), property.MustBuildBundleObject([]byte(crdbackups)), property.MustBuildBundleObject([]byte(crdclusters)), - property.MustBuildBundleObject([]byte(csvJson)), + property.MustBuildBundleObject([]byte(csvJSON)), property.MustBuildBundleObject([]byte(crdrestores)), }, - CsvJSON: csvJson, + CsvJSON: csvJSON, Objects: []string{ crdbackups, crdclusters, - csvJson, + csvJSON, crdrestores, }, RelatedImages: []model.RelatedImage{ @@ -106,11 +106,11 @@ func testAPIBundle() Bundle { {Type: "olm.gvk", Value: `{"group":"etcd.database.coreos.com","kind":"EtcdBackup","version":"v1beta2"}`}, }, Replaces: "etcdoperator.v0.9.2", - CsvJson: csvJson, + CsvJson: csvJSON, Object: []string{ crdbackups, crdclusters, - csvJson, + csvJSON, crdrestores}, } } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 47a156fed..dacf75ea4 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -17,9 +17,9 @@ func TestCache_GetBundle(t *testing.T) { t.Run(name, func(t *testing.T) { b, err := testQuerier.GetBundle(context.TODO(), "etcd", "singlenamespace-alpha", "etcdoperator.v0.9.4") require.NoError(t, err) - require.Equal(t, b.PackageName, "etcd") - require.Equal(t, b.ChannelName, "singlenamespace-alpha") - require.Equal(t, b.CsvName, "etcdoperator.v0.9.4") + require.Equal(t, "etcd", b.PackageName) + require.Equal(t, "singlenamespace-alpha", b.ChannelName) + require.Equal(t, "etcdoperator.v0.9.4", b.CsvName) }) } } @@ -31,9 +31,9 @@ func TestCache_GetBundleForChannel(t *testing.T) { require.NoError(t, err) require.NotNil(t, b) - require.Equal(t, b.PackageName, "etcd") - require.Equal(t, b.ChannelName, "singlenamespace-alpha") - require.Equal(t, b.CsvName, "etcdoperator.v0.9.4") + require.Equal(t, "etcd", b.PackageName) + require.Equal(t, "singlenamespace-alpha", b.ChannelName) + require.Equal(t, "etcdoperator.v0.9.4", b.CsvName) }) } } @@ -44,9 +44,9 @@ func TestCache_GetBundleThatProvides(t *testing.T) { b, err := testQuerier.GetBundleThatProvides(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdBackup") require.NoError(t, err) require.NotNil(t, b) - require.Equal(t, b.PackageName, "etcd") - require.Equal(t, b.ChannelName, "singlenamespace-alpha") - require.Equal(t, b.CsvName, "etcdoperator.v0.9.4") + require.Equal(t, "etcd", b.PackageName) + require.Equal(t, "singlenamespace-alpha", b.ChannelName) + require.Equal(t, "etcdoperator.v0.9.4", b.CsvName) }) } } @@ -57,9 +57,9 @@ func TestCache_GetBundleThatReplaces(t *testing.T) { b, err := testQuerier.GetBundleThatReplaces(context.TODO(), "etcdoperator.v0.9.0", "etcd", "singlenamespace-alpha") require.NoError(t, err) require.NotNil(t, b) - require.Equal(t, b.PackageName, "etcd") - require.Equal(t, b.ChannelName, "singlenamespace-alpha") - require.Equal(t, b.CsvName, "etcdoperator.v0.9.2") + require.Equal(t, "etcd", b.PackageName) + require.Equal(t, "singlenamespace-alpha", b.ChannelName) + require.Equal(t, "etcdoperator.v0.9.2", b.CsvName) }) } } @@ -214,7 +214,7 @@ func TestCache_ListPackages(t *testing.T) { packages, err := testQuerier.ListPackages(context.TODO()) require.NoError(t, err) require.NotNil(t, packages) - require.Equal(t, 2, len(packages)) + require.Len(t, packages, 2) }) } } diff --git a/pkg/cache/tar_test.go b/pkg/cache/tar_test.go index d95321f93..02b1f3d8a 100644 --- a/pkg/cache/tar_test.go +++ b/pkg/cache/tar_test.go @@ -2,7 +2,6 @@ package cache import ( "bytes" - "errors" "fmt" "hash/fnv" "io/fs" @@ -26,7 +25,7 @@ func Test_fsToTar(t *testing.T) { return notExist }, expect: func(t *testing.T, bytes []byte, err error) { - require.True(t, errors.Is(err, fs.ErrNotExist)) + require.ErrorIs(t, err, fs.ErrNotExist) }, }, { diff --git a/pkg/configmap/configmap_test.go b/pkg/configmap/configmap_test.go index 433a7661d..05e54bd93 100644 --- a/pkg/configmap/configmap_test.go +++ b/pkg/configmap/configmap_test.go @@ -37,7 +37,7 @@ func TestLoad(t *testing.T) { crdListGot := bundleGot.GetObject() // 1 CSV + 1 CRD = 2 objects - assert.Equal(t, 2, len(crdListGot)) + assert.Len(t, crdListGot, 2) }, }, { @@ -46,11 +46,11 @@ func TestLoad(t *testing.T) { assertFunc: func(t *testing.T, bundleGot *api.Bundle) { objects := bundleGot.GetObject() assert.NotNil(t, objects) - assert.Equal(t, 1, len(objects)) + assert.Len(t, objects, 1) unst, err := unstructuredlib.FromString(objects[0]) - assert.NoError(t, err) - assert.True(t, unst.GetKind() == "Foo") + require.NoError(t, err) + assert.Equal(t, "Foo", unst.GetKind()) }, }, { @@ -61,7 +61,7 @@ func TestLoad(t *testing.T) { assert.NotNil(t, csvGot) unst, err := unstructuredlib.FromString(csvGot) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, unst.GetName() == "first" || unst.GetName() == "second") }, }, @@ -80,12 +80,12 @@ func TestLoad(t *testing.T) { csvGot := bundleGot.GetCsvJson() assert.NotNil(t, csvGot) unst, err := unstructuredlib.FromString(csvGot) - assert.NoError(t, err) - assert.True(t, unst.GetName() == "kiali-operator.v1.4.2") + require.NoError(t, err) + assert.Equal(t, "kiali-operator.v1.4.2", unst.GetName()) objects := bundleGot.GetObject() // 2 CRDs + 1 CSV == 3 objects - assert.Equal(t, 3, len(objects)) + assert.Len(t, objects, 3) }, }, { @@ -95,11 +95,11 @@ func TestLoad(t *testing.T) { csvGot := bundleGot.GetCsvJson() assert.NotNil(t, csvGot) unst, err := unstructuredlib.FromString(csvGot) - assert.NoError(t, err) - assert.True(t, unst.GetName() == "kiali-operator.v1.4.2") + require.NoError(t, err) + assert.Equal(t, "kiali-operator.v1.4.2", unst.GetName()) objects := bundleGot.GetObject() - assert.Equal(t, 3, len(objects)) + assert.Len(t, objects, 3) }, }, } @@ -111,7 +111,7 @@ func TestLoad(t *testing.T) { loader := NewBundleLoader() bundleGot, errGot := loader.Load(cm) - assert.NoError(t, errGot) + require.NoError(t, errGot) assert.NotNil(t, bundleGot) if tt.assertFunc != nil { @@ -148,23 +148,24 @@ func TestLoadWriteRead(t *testing.T) { }, } clientset := fake.NewSimpleClientset() - clientset.CoreV1().ConfigMaps(configMapNamespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, _ = clientset.CoreV1().ConfigMaps(configMapNamespace).Create(context.TODO(), cm, metav1.CreateOptions{}) cmLoader := NewConfigMapLoaderWithClient(configMapName, configMapNamespace, tt.source, tt.gzip, clientset) err := cmLoader.Populate(1 << 20) - assert.NoError(t, err) + require.NoError(t, err) cm, err = clientset.CoreV1().ConfigMaps(configMapNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) - assert.NoError(t, err) + require.NoError(t, err) bundleLoader := NewBundleLoader() bundle, err := bundleLoader.Load(cm) + require.NoError(t, err) expectedObjects, err := unstructuredlib.FromDir(tt.source + "manifests/") - assert.NoError(t, err) + require.NoError(t, err) bundleObjects, err := unstructuredlib.FromBundle(bundle) - assert.NoError(t, err) + require.NoError(t, err) assert.ElementsMatch(t, expectedObjects, bundleObjects) }) diff --git a/pkg/containertools/dockerfilegenerator_test.go b/pkg/containertools/dockerfilegenerator_test.go index 9952812bc..41810bc2e 100644 --- a/pkg/containertools/dockerfilegenerator_test.go +++ b/pkg/containertools/dockerfilegenerator_test.go @@ -31,7 +31,7 @@ CMD ["registry", "serve", "--database", "/database/index.db"] } dockerfile := dockerfileGenerator.GenerateIndexDockerfile(binarySourceImage, databasePath) - require.Equal(t, dockerfile, expectedDockerfile) + require.Equal(t, expectedDockerfile, dockerfile) } func TestGenerateDockerfile_EmptyBaseImage(t *testing.T) { @@ -54,5 +54,5 @@ CMD ["registry", "serve", "--database", "/database/index.db"] } dockerfile := dockerfileGenerator.GenerateIndexDockerfile("", databasePath) - require.Equal(t, dockerfile, expectedDockerfile) + require.Equal(t, expectedDockerfile, dockerfile) } diff --git a/pkg/containertools/labelreader_test.go b/pkg/containertools/labelreader_test.go index f685765f1..334dbdf31 100644 --- a/pkg/containertools/labelreader_test.go +++ b/pkg/containertools/labelreader_test.go @@ -32,7 +32,7 @@ func TestReadDockerLabels(t *testing.T) { labels, err := labelReader.GetLabelsFromImage(image) require.NoError(t, err) - require.Equal(t, labels[expectedLabelKey], expectedLabelVal) + require.Equal(t, expectedLabelVal, labels[expectedLabelKey]) } func TestReadDockerLabelsNoLabels(t *testing.T) { @@ -55,7 +55,7 @@ func TestReadDockerLabelsNoLabels(t *testing.T) { labels, err := labelReader.GetLabelsFromImage(image) require.NoError(t, err) - require.Equal(t, len(labels), 0) + require.Empty(t, labels) } func TestReadPodmanLabels(t *testing.T) { @@ -80,7 +80,7 @@ func TestReadPodmanLabels(t *testing.T) { labels, err := labelReader.GetLabelsFromImage(image) require.NoError(t, err) - require.Equal(t, labels[expectedLabelKey], expectedLabelVal) + require.Equal(t, expectedLabelVal, labels[expectedLabelKey]) } func TestReadPodmanLabelsNoLabels(t *testing.T) { @@ -103,7 +103,7 @@ func TestReadPodmanLabelsNoLabels(t *testing.T) { labels, err := labelReader.GetLabelsFromImage(image) require.NoError(t, err) - require.Equal(t, len(labels), 0) + require.Empty(t, labels) } func TestReadDockerLabels_PullError(t *testing.T) { diff --git a/pkg/image/registry_test.go b/pkg/image/registry_test.go index 378292a34..ac1df7ca4 100644 --- a/pkg/image/registry_test.go +++ b/pkg/image/registry_test.go @@ -2,12 +2,13 @@ package image_test import ( "context" + "crypto/rand" "crypto/x509" "errors" "fmt" "io" "math" - "math/rand" + "math/big" "net/http" "os" "sync" @@ -44,9 +45,11 @@ func poolForCertFile(t *testing.T, file string) *x509.CertPool { func TestRegistries(t *testing.T) { registries := map[string]newRegistryFunc{ "containerd": func(t *testing.T, cafile string) (image.Registry, cleanupFunc) { + val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) r, err := containerdregistry.NewRegistry( containerdregistry.WithLog(logrus.New().WithField("test", t.Name())), - containerdregistry.WithCacheDir(fmt.Sprintf("cache-%x", rand.Int())), + containerdregistry.WithCacheDir(fmt.Sprintf("cache-%x", val)), containerdregistry.WithRootCAs(poolForCertFile(t, cafile)), ) require.NoError(t, err) @@ -180,7 +183,10 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { maxCount: tt.args.pullErrCount, err: tt.args.pullErr, }} - middlewareName := fmt.Sprintf("test-%x", rand.Int()) + val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + + middlewareName := fmt.Sprintf("test-%x", val) require.NoError(t, repositorymiddleware.Register(middlewareName, mockRepo.init)) config.Middleware["repository"] = append(config.Middleware["repository"], configuration.Middleware{ Name: middlewareName, diff --git a/pkg/lib/bundle/exporter_test.go b/pkg/lib/bundle/exporter_test.go index 6dda2a5e2..b62b31457 100644 --- a/pkg/lib/bundle/exporter_test.go +++ b/pkg/lib/bundle/exporter_test.go @@ -3,7 +3,7 @@ package bundle import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/operator-framework/operator-registry/pkg/containertools" ) @@ -11,12 +11,12 @@ import ( func TestExportForBundleWithBadImage(t *testing.T) { exporter := NewExporterForBundle("foo", "", containertools.DockerTool) err := exporter.Export(true, false) - assert.Error(t, err) + require.Error(t, err) err = exporter.Export(false, true) - assert.Error(t, err) + require.Error(t, err) exporter = NewExporterForBundle("foo", "", containertools.NoneTool) err = exporter.Export(true, false) - assert.Error(t, err) + require.Error(t, err) } diff --git a/pkg/lib/bundle/generate_test.go b/pkg/lib/bundle/generate_test.go index 6130fe1fd..45cf621f5 100644 --- a/pkg/lib/bundle/generate_test.go +++ b/pkg/lib/bundle/generate_test.go @@ -55,12 +55,12 @@ func TestValidateAnnotations(t *testing.T) { err error }{ { - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", }), - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -68,13 +68,13 @@ func TestValidateAnnotations(t *testing.T) { nil, }, { - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", "test3": "beta", }), - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -82,12 +82,12 @@ func TestValidateAnnotations(t *testing.T) { nil, }, { - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable", }), - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -95,12 +95,12 @@ func TestValidateAnnotations(t *testing.T) { fmt.Errorf(`Expect field "test2" to have value "stable,beta" instead of "stable"`), }, { - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test3": "stable", }), - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -109,7 +109,7 @@ func TestValidateAnnotations(t *testing.T) { }, { []byte("\t"), - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -117,7 +117,7 @@ func TestValidateAnnotations(t *testing.T) { fmt.Errorf("yaml: found character that cannot start any token"), }, { - buildTestAnnotations("annotations", + buildTestAnnotations( map[string]string{ "test1": "stable", "test2": "stable,beta", @@ -132,7 +132,7 @@ func TestValidateAnnotations(t *testing.T) { if item.err != nil { require.Equal(t, item.err.Error(), err.Error()) } else { - require.Nil(t, err) + require.NoError(t, err) } } } @@ -183,10 +183,10 @@ COPY x/y/z /metadata/ } for _, tt := range tests { - tt_expected := fmt.Sprintf(expected, tt.baseImage) + ttExpected := fmt.Sprintf(expected, tt.baseImage) actual, err := GenerateDockerfile("test1", "test2", "metadata/", filepath.Join("a", "b", "c"), filepath.Join("x", "y", "z"), "./", "test4", "test5", "", tt.baseImage) require.NoError(t, err) - require.Equal(t, tt_expected, string(actual)) + require.Equal(t, ttExpected, string(actual)) } } diff --git a/pkg/lib/bundle/utils_test.go b/pkg/lib/bundle/utils_test.go index 6757b6ac7..b1977068e 100644 --- a/pkg/lib/bundle/utils_test.go +++ b/pkg/lib/bundle/utils_test.go @@ -36,7 +36,7 @@ func cleanup() { } func createDir(dir string) { - os.MkdirAll(dir, os.ModePerm) + _ = os.MkdirAll(dir, os.ModePerm) } func createFiles(dir, input string) { @@ -56,20 +56,9 @@ func createFiles(dir, input string) { } } -func buildTestAnnotations(key string, items map[string]string) []byte { +func buildTestAnnotations(items map[string]string) []byte { temp := make(map[string]interface{}) - temp[key] = items + temp["annotations"] = items output, _ := yaml.Marshal(temp) return output } - -func clearDir(dir string) { - items, _ := os.ReadDir(dir) - - for _, item := range items { - if item.IsDir() { - continue - } - os.Remove(filepath.Join(dir, item.Name())) - } -} diff --git a/pkg/lib/bundle/validate_test.go b/pkg/lib/bundle/validate_test.go index aafe18a69..cb4a7283f 100644 --- a/pkg/lib/bundle/validate_test.go +++ b/pkg/lib/bundle/validate_test.go @@ -98,7 +98,7 @@ func TestValidateBundle_InvalidRegistryVersion(t *testing.T) { var validationError ValidationError isValidationErr := errors.As(err, &validationError) require.True(t, isValidationErr) - require.Equal(t, len(validationError.Errors), 1) + require.Len(t, validationError.Errors, 1) } func TestValidateBundleContent(t *testing.T) { diff --git a/pkg/lib/dns/nsswitch_test.go b/pkg/lib/dns/nsswitch_test.go index c49bc6b0e..02fd17819 100644 --- a/pkg/lib/dns/nsswitch_test.go +++ b/pkg/lib/dns/nsswitch_test.go @@ -49,7 +49,7 @@ func TestEnsureNsswitch(t *testing.T) { NsswitchFilename = "testfile" if tt.existingFile { - require.NoError(t, os.WriteFile(NsswitchFilename, []byte("test"), 0644)) + require.NoError(t, os.WriteFile(NsswitchFilename, []byte("test"), 0600)) } if err := EnsureNsswitch(); (err != nil) != tt.wantErr { diff --git a/pkg/lib/indexer/indexer_test.go b/pkg/lib/indexer/indexer_test.go index e4fd6fc2a..570b1eabd 100644 --- a/pkg/lib/indexer/indexer_test.go +++ b/pkg/lib/indexer/indexer_test.go @@ -33,7 +33,7 @@ func TestGetBundlesToExport(t *testing.T) { t.Fatalf("exporting bundles from db: %s", err) } - var bundleImages []string + bundleImages := make([]string, 0, len(bundleMap)) for bundlePath := range bundleMap { bundleImages = append(bundleImages, bundlePath) } @@ -41,7 +41,7 @@ func TestGetBundlesToExport(t *testing.T) { sort.Strings(bundleImages) if !reflect.DeepEqual(expected, bundleImages) { - t.Fatalf("exporting images: expected matching bundlepaths: expected %s got %s", expected, bundleImages) + t.Fatalf("exporting images: expected matching bundlepaths: expected %#v got %#v", expected, bundleImages) } } diff --git a/pkg/lib/registry/registry_test.go b/pkg/lib/registry/registry_test.go index fc7d514ea..c52296362 100644 --- a/pkg/lib/registry/registry_test.go +++ b/pkg/lib/registry/registry_test.go @@ -2,21 +2,22 @@ package registry import ( "context" + "crypto/rand" "database/sql" "encoding/json" "errors" "fmt" "io" - "math/rand" + "math" + "math/big" "os" "path/filepath" "testing" "testing/fstest" - "time" "github.com/blang/semver/v4" "github.com/stretchr/testify/require" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" @@ -78,13 +79,13 @@ func newCache(t *testing.T, bundles []*model.Bundle) cache.Cache { } } if !pkgPropertyFound { - pkgJson, _ := json.Marshal(property.Package{ + pkgJSON, _ := json.Marshal(property.Package{ PackageName: b.Package.Name, Version: b.Version.String(), }) b.Properties = append(b.Properties, property.Property{ Type: property.TypePackage, - Value: pkgJson, + Value: pkgJSON, }) } } @@ -281,12 +282,10 @@ func TestUnpackImage(t *testing.T) { } } -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -func CreateTestDb(t *testing.T) (*sql.DB, func()) { - dbName := fmt.Sprintf("test-%d.db", rand.Int()) +func CreateTestDB(t *testing.T) (*sql.DB, func()) { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + dbName := fmt.Sprintf("test-%d.db", r) db, err := sqlite.Open(dbName) require.NoError(t, err) @@ -326,10 +325,10 @@ func newUnpackedTestBundle(dir, name string, csvSpec json.RawMessage, annotation } rawCSV, err := json.Marshal(registry.ClusterServiceVersion{ - TypeMeta: v1.TypeMeta{ + TypeMeta: metav1.TypeMeta{ Kind: sqlite.ClusterServiceVersionKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: csvSpec, @@ -342,14 +341,17 @@ func newUnpackedTestBundle(dir, name string, csvSpec json.RawMessage, annotation if err := json.Unmarshal(rawCSV, &rawObj); err != nil { return bundleDir, cleanup, err } - rawObj.SetCreationTimestamp(v1.Time{}) + rawObj.SetCreationTimestamp(metav1.Time{}) jsonout, err := rawObj.MarshalJSON() + if err != nil { + return bundleDir, cleanup, err + } out, err := yaml.JSONToYAML(jsonout) if err != nil { return bundleDir, cleanup, err } - if err := os.WriteFile(filepath.Join(bundleDir, bundle.ManifestsDir, "csv.yaml"), out, 0666); err != nil { + if err := os.WriteFile(filepath.Join(bundleDir, bundle.ManifestsDir, "csv.yaml"), out, 0600); err != nil { return bundleDir, cleanup, err } @@ -357,7 +359,7 @@ func newUnpackedTestBundle(dir, name string, csvSpec json.RawMessage, annotation if err != nil { return bundleDir, cleanup, err } - if err := os.WriteFile(filepath.Join(bundleDir, bundle.MetadataDir, "annotations.yaml"), out, 0666); err != nil { + if err := os.WriteFile(filepath.Join(bundleDir, bundle.MetadataDir, "annotations.yaml"), out, 0600); err != nil { return bundleDir, cleanup, err } return bundleDir, cleanup, nil @@ -653,7 +655,7 @@ func TestCheckForBundles(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { tmpdir := t.TempDir() - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := sqlite.NewSQLLiteLoader(db) require.NoError(t, err) diff --git a/pkg/mirror/mirror_test.go b/pkg/mirror/mirror_test.go index 301380a14..e5de413c1 100644 --- a/pkg/mirror/mirror_test.go +++ b/pkg/mirror/mirror_test.go @@ -2,9 +2,11 @@ package mirror import ( "context" + "crypto/rand" "database/sql" "fmt" - "math/rand" + "math" + "math/big" "os" "testing" @@ -13,8 +15,10 @@ import ( "github.com/operator-framework/operator-registry/pkg/sqlite" ) -func CreateTestDb(t *testing.T) (*sql.DB, string, func()) { - dbName := fmt.Sprintf("test-%d.db", rand.Int()) +func CreateTestDB(t *testing.T) (*sql.DB, string, func()) { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + dbName := fmt.Sprintf("test-%d.db", r.Int64()) db, err := sqlite.Open(dbName) require.NoError(t, err) @@ -39,10 +43,10 @@ func CreateTestDb(t *testing.T) (*sql.DB, string, func()) { } func TestIndexImageMirrorer_Mirror(t *testing.T) { - _, path, cleanup := CreateTestDb(t) + _, path, cleanup := CreateTestDB(t) defer cleanup() - var testExtractor DatabaseExtractorFunc = func(from string) (s string, e error) { + var testExtractor DatabaseExtractorFunc = func(from string) (string, error) { return path, nil } type fields struct { diff --git a/pkg/prettyunmarshaler/prettyunmarshaler_test.go b/pkg/prettyunmarshaler/prettyunmarshaler_test.go index a32287319..a904cd34f 100644 --- a/pkg/prettyunmarshaler/prettyunmarshaler_test.go +++ b/pkg/prettyunmarshaler/prettyunmarshaler_test.go @@ -20,6 +20,7 @@ func TestJsonUnmarshalError(t *testing.T) { } validData := []byte(`{"messages": ["Hello", "world!"]}`) invalidData := []byte(`{"messages": ["Hello", "world!"]`) + var invalidDataSource byte = 0 for _, tc := range []testCase{ { name: "unknown error", @@ -91,7 +92,7 @@ func TestJsonUnmarshalError(t *testing.T) { { name: "syntax error: no data", data: nil, - inErr: json.Unmarshal(invalidData, nil), + inErr: json.Unmarshal(invalidData, &invalidDataSource), expectErrorString: `unexpected end of JSON input`, expectPrettyString: `unexpected end of JSON input`, }, @@ -148,7 +149,13 @@ func TestJsonUnmarshalError(t *testing.T) { // // If the data does not cause a syntax error, this function will panic. func customOffsetSyntaxError(data []byte, offset int64) *json.SyntaxError { - err := json.Unmarshal(data, nil).(*json.SyntaxError) - err.Offset = offset - return err + var d *byte = nil + var se *json.SyntaxError + err := json.Unmarshal(data, d) + if errors.As(err, &se) { + se.Offset = offset + return se + } + + panic("error was not of type json.SyntaxError") } diff --git a/pkg/registry/bundle_test.go b/pkg/registry/bundle_test.go index 234487adf..a7fd8c8d4 100644 --- a/pkg/registry/bundle_test.go +++ b/pkg/registry/bundle_test.go @@ -66,6 +66,7 @@ func TestV1CRDsInBundle(t *testing.T) { {Group: "objectbucket.io", Version: "v1alpha1", Kind: "ObjectBucketClaim", Plural: "objectbucketclaims"}: {}, } providedAPIs, err := bundle.ProvidedAPIs() + require.NoError(t, err) t.Logf("provided CRDs: \n%#v", providedAPIs) if !reflect.DeepEqual(expectedAPIs, providedAPIs) { @@ -76,6 +77,7 @@ func TestV1CRDsInBundle(t *testing.T) { // bundle contains one v1beta1 and one v1 CRD dec := serializer.NewCodecFactory(Scheme).UniversalDeserializer() crds, err := bundle.CustomResourceDefinitions() + require.NoError(t, err) for _, crd := range crds { switch crd.(type) { case *apiextensionsv1.CustomResourceDefinition: diff --git a/pkg/registry/csv_test.go b/pkg/registry/csv_test.go index f649b6c0a..8f23f0478 100644 --- a/pkg/registry/csv_test.go +++ b/pkg/registry/csv_test.go @@ -7,13 +7,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -26,8 +26,8 @@ func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { { name: "v1alpha1 with owned, required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "apiservicedefinitions": { @@ -60,8 +60,8 @@ func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { { name: "v1alpha1 with owned", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "apiservicedefinitions": { @@ -83,8 +83,8 @@ func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { { name: "v1alpha1 with required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "apiservicedefinitions": { @@ -106,16 +106,16 @@ func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { { name: "v1alpha1 missing owned,required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"replaces": 5}`), }, }, { name: "v1alpha1 malformed owned,required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "apiservicedefinitions": { @@ -151,8 +151,8 @@ func TestClusterServiceVersion_GetApiServiceDefinitions(t *testing.T) { func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -165,8 +165,8 @@ func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { { name: "v1alpha1 with owned, required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "customresourcedefinitions": { @@ -199,8 +199,8 @@ func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { { name: "v1alpha1 with owned", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "customresourcedefinitions": { @@ -222,8 +222,8 @@ func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { { name: "v1alpha1 with required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "customresourcedefinitions": { @@ -245,16 +245,16 @@ func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { { name: "v1alpha1 missing owned,required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"replaces": 5}`), }, }, { name: "v1alpha1 malformed owned,required", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "customresourcedefinitions": { @@ -291,8 +291,8 @@ func TestClusterServiceVersion_GetCustomResourceDefintions(t *testing.T) { func TestClusterServiceVersion_GetReplaces(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -304,8 +304,8 @@ func TestClusterServiceVersion_GetReplaces(t *testing.T) { { name: "v1alpha1 with replaces", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"replaces": "etcd-operator.v0.9.2"}`), }, want: "etcd-operator.v0.9.2", @@ -313,8 +313,8 @@ func TestClusterServiceVersion_GetReplaces(t *testing.T) { { name: "v1alpha1 no replaces", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"other": "field"}`), }, want: "", @@ -322,8 +322,8 @@ func TestClusterServiceVersion_GetReplaces(t *testing.T) { { name: "v1alpha1 malformed replaces", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"replaces": 5}`), }, wantErr: true, @@ -350,8 +350,8 @@ func TestClusterServiceVersion_GetReplaces(t *testing.T) { func TestClusterServiceVersion_GetSkips(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -363,8 +363,8 @@ func TestClusterServiceVersion_GetSkips(t *testing.T) { { name: "v1alpha1 with skips", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"skips": ["1.0.5", "1.0.4"]}`), }, want: []string{"1.0.5", "1.0.4"}, @@ -372,8 +372,8 @@ func TestClusterServiceVersion_GetSkips(t *testing.T) { { name: "v1alpha1 no skips", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"other": "field"}`), }, want: nil, @@ -381,8 +381,8 @@ func TestClusterServiceVersion_GetSkips(t *testing.T) { { name: "v1alpha1 malformed skips", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"skips": 5}`), }, wantErr: true, @@ -408,8 +408,8 @@ func TestClusterServiceVersion_GetSkips(t *testing.T) { func TestClusterServiceVersion_GetVersion(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -421,8 +421,8 @@ func TestClusterServiceVersion_GetVersion(t *testing.T) { { name: "v1alpha1 with version", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"version": "1.0.5"}`), }, want: "1.0.5", @@ -430,8 +430,8 @@ func TestClusterServiceVersion_GetVersion(t *testing.T) { { name: "v1alpha1 no version", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"other": "field"}`), }, want: "", @@ -439,8 +439,8 @@ func TestClusterServiceVersion_GetVersion(t *testing.T) { { name: "v1alpha1 malformed version", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"version": 5}`), }, wantErr: true, @@ -467,8 +467,8 @@ func TestClusterServiceVersion_GetVersion(t *testing.T) { func TestClusterServiceVersion_GetRelatedImages(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -480,8 +480,8 @@ func TestClusterServiceVersion_GetRelatedImages(t *testing.T) { { name: "no related images", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"no": "field"}`), }, want: map[string]struct{}{}, @@ -489,8 +489,8 @@ func TestClusterServiceVersion_GetRelatedImages(t *testing.T) { { name: "one related image", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"relatedImages": [ {"name": "test", "image": "quay.io/etcd/etcd-operator@sha256:123"} ]}`), @@ -500,8 +500,8 @@ func TestClusterServiceVersion_GetRelatedImages(t *testing.T) { { name: "multiple related images", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(`{"relatedImages": [ {"name": "test", "image": "quay.io/etcd/etcd-operator@sha256:123"}, {"name": "operand", "image": "quay.io/etcd/etcd@sha256:123"} @@ -529,8 +529,8 @@ func TestClusterServiceVersion_GetRelatedImages(t *testing.T) { func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { type fields struct { - TypeMeta v1.TypeMeta - ObjectMeta v1.ObjectMeta + TypeMeta metav1.TypeMeta + ObjectMeta metav1.ObjectMeta Spec json.RawMessage } tests := []struct { @@ -542,8 +542,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "bad strategy", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` {"install": {"strategy": "nope", "spec": {"deployments":[{"name":"etcd-operator","spec":{"template":{"spec":{"containers":[{ "command":["etcd-operator"], @@ -555,8 +555,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "no images", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` {"install": {"strategy": "deployment","spec": {"deployments":[{"name":"etcd-operator","spec":{"template":{"spec": "containers":[] @@ -568,8 +568,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "one image", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` {"install": {"strategy": "deployment", "spec": {"deployments":[{ "name":"etcd-operator", @@ -592,8 +592,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "two container images", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` {"install": {"strategy": "deployment", "spec": {"deployments":[{ "name":"etcd-operator", @@ -621,8 +621,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "init container image", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "install": { @@ -655,8 +655,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "two init container images", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "install": { @@ -694,8 +694,8 @@ func TestClusterServiceVersion_GetOperatorImages(t *testing.T) { { name: "container and init container", fields: fields{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, Spec: json.RawMessage(` { "install": { @@ -820,17 +820,17 @@ func TestLoadingCsvFromBundleDirectory(t *testing.T) { assert.EqualValues(t, tt.name, csv.GetName()) csvVersion, err := csv.GetVersion() - assert.NoError(t, err) + require.NoError(t, err) assert.EqualValues(t, tt.version, csvVersion) assert.EqualValues(t, tt.skipRange, csv.GetSkipRange()) csvReplace, err := csv.GetReplaces() - assert.NoError(t, err) + require.NoError(t, err) assert.EqualValues(t, tt.replace, csvReplace) csvSkips, err := csv.GetSkips() - assert.NoError(t, err) + require.NoError(t, err) assert.EqualValues(t, tt.skips, csvSkips) }) } diff --git a/pkg/registry/decode_test.go b/pkg/registry/decode_test.go index 02b26de5a..7dc7c6698 100644 --- a/pkg/registry/decode_test.go +++ b/pkg/registry/decode_test.go @@ -23,7 +23,7 @@ func TestDecodeUnstructured(t *testing.T) { name: "ValidObjectWithKind", file: "testdata/valid-unstructured.yaml", assertFunc: func(t *testing.T, objGot *unstructured.Unstructured, errGot error) { - assert.NoError(t, errGot) + require.NoError(t, errGot) assert.NotNil(t, objGot) assert.Equal(t, "FooKind", objGot.GetKind()) @@ -34,7 +34,7 @@ func TestDecodeUnstructured(t *testing.T) { name: "InvalidObjectWithoutKind", file: "testdata/invalid-unstructured.yaml", assertFunc: func(t *testing.T, objGot *unstructured.Unstructured, errGot error) { - assert.Error(t, errGot) + require.Error(t, errGot) assert.Nil(t, objGot) }, }, @@ -63,7 +63,7 @@ func TestDecodePackageManifest(t *testing.T) { name: "WithValidObject", file: "testdata/valid-package-manifest.yaml", assertFunc: func(t *testing.T, packageManifestGot *PackageManifest, errGot error) { - assert.NoError(t, errGot) + require.NoError(t, errGot) assert.NotNil(t, packageManifestGot) assert.Equal(t, "foo", packageManifestGot.PackageName) @@ -74,7 +74,7 @@ func TestDecodePackageManifest(t *testing.T) { name: "WithoutPackageName", file: "testdata/invalid-package-manifest.yaml", assertFunc: func(t *testing.T, packageManifestGot *PackageManifest, errGot error) { - assert.Error(t, errGot) + require.Error(t, errGot) assert.Nil(t, packageManifestGot) }, }, @@ -109,21 +109,21 @@ func TestDecodeFileFS(t *testing.T) { var nilPtr *foo require.NoError(t, decodeFileFS(root, "foo.yaml", nilPtr, entry)) require.Nil(t, nilPtr) - require.Equal(t, 0, len(logHook.Entries)) + require.Empty(t, logHook.Entries) logHook.Reset() ptr := &foo{} require.NoError(t, decodeFileFS(root, "foo.yaml", ptr, entry)) require.NotNil(t, ptr) require.Equal(t, "baz", ptr.Bar) - require.Equal(t, 0, len(logHook.Entries)) + require.Empty(t, logHook.Entries) logHook.Reset() ptr = &foo{} require.NoError(t, decodeFileFS(root, "multi.yaml", ptr, entry)) require.NotNil(t, ptr) require.Equal(t, "baz", ptr.Bar) - require.Equal(t, 1, len(logHook.Entries)) + require.Len(t, logHook.Entries, 1) require.Equal(t, logrus.WarnLevel, logHook.LastEntry().Level) require.Equal(t, "found more than one document inside multi.yaml, using only the first one", logHook.LastEntry().Message) logHook.Reset() diff --git a/pkg/registry/helper_test.go b/pkg/registry/helper_test.go index 9121fa450..f0a2a8b18 100644 --- a/pkg/registry/helper_test.go +++ b/pkg/registry/helper_test.go @@ -18,13 +18,13 @@ func TestBundleVersionCompare(t *testing.T) { type order func(t *testing.T, val int) var ( lt order = func(t *testing.T, val int) { - require.Less(t, val, 0) + require.Negative(t, val) } gt order = func(t *testing.T, val int) { - require.Greater(t, val, 0) + require.Positive(t, val) } eq order = func(t *testing.T, val int) { - require.Equal(t, val, 0) + require.Equal(t, 0, val) } ) type expect struct { diff --git a/pkg/registry/parse_test.go b/pkg/registry/parse_test.go index d661a27cd..a912a9e41 100644 --- a/pkg/registry/parse_test.go +++ b/pkg/registry/parse_test.go @@ -563,12 +563,12 @@ func TestDerivedProperties(t *testing.T) { properties, err := parser.derivedProperties(in) if tt.expected.err { - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, properties) return } - assert.NoError(t, err) + require.NoError(t, err) assert.ElementsMatch(t, tt.expected.properties, properties) }) } diff --git a/pkg/registry/populator_test.go b/pkg/registry/populator_test.go index c0b310c71..561e718fb 100644 --- a/pkg/registry/populator_test.go +++ b/pkg/registry/populator_test.go @@ -2,23 +2,24 @@ package registry_test import ( "context" + "crypto/rand" "database/sql" "encoding/json" + "errors" "fmt" - "math/rand" + "math" + "math/big" "os" "path/filepath" "reflect" "strings" "testing" - "time" "github.com/blang/semver/v4" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/yaml" @@ -32,12 +33,10 @@ import ( "github.com/operator-framework/operator-registry/pkg/sqlite" ) -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -func CreateTestDb(t *testing.T) (*sql.DB, func()) { - dbName := fmt.Sprintf("test-%d.db", rand.Int()) +func CreateTestDB(t *testing.T) (*sql.DB, func()) { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + dbName := fmt.Sprintf("test-%d.db", r.Int64()) db, err := sqlite.Open(dbName) require.NoError(t, err) @@ -92,7 +91,7 @@ func createAndPopulateDB(db *sql.DB) (*sqlite.SQLQuerier, error) { func TestImageLoader(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() _, err := createAndPopulateDB(db) @@ -101,7 +100,7 @@ func TestImageLoader(t *testing.T) { func TestQuerierForImage(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := createAndPopulateDB(db) @@ -203,6 +202,7 @@ func TestQuerierForImage(t *testing.T) { EqualBundles(t, *expectedBundle, *etcdBundleByReplaces) etcdChannelEntriesThatProvide, err := store.GetChannelEntriesThatProvide(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdCluster") + require.NoError(t, err) require.ElementsMatch(t, []*registry.ChannelEntry{ {"etcd", "alpha", "etcdoperator.v0.9.0", ""}, {"etcd", "alpha", "etcdoperator.v0.9.2", "etcdoperator.v0.9.1"}, @@ -496,7 +496,7 @@ func TestImageLoading(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := sqlite.NewSQLLiteLoader(db) require.NoError(t, err) @@ -540,7 +540,8 @@ func TestImageLoading(t *testing.T) { } func checkAggErr(aggErr, wantErr error) bool { - if a, ok := aggErr.(utilerrors.Aggregate); ok { + var a utilerrors.Aggregate + if errors.As(aggErr, &a) { for _, e := range a.Errors() { if reflect.TypeOf(e).String() == reflect.TypeOf(wantErr).String() { return true @@ -553,7 +554,7 @@ func checkAggErr(aggErr, wantErr error) bool { func TestQuerierForDependencies(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := createAndPopulateDB(db) @@ -612,7 +613,7 @@ func TestQuerierForDependencies(t *testing.T) { func TestListBundles(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := createAndPopulateDB(db) @@ -663,7 +664,7 @@ func TestListBundles(t *testing.T) { } } } - require.Equal(t, 10, len(bundles)) + require.Len(t, bundles, 10) require.ElementsMatch(t, expectedDependencies, dependencies) } @@ -721,7 +722,7 @@ func CheckBundlesHaveContentsIfNoPath(t *testing.T, db *sql.DB) { } func TestDirectoryPopulator(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() loader, err := sqlite.NewSQLLiteLoader(db) @@ -747,7 +748,7 @@ func TestDirectoryPopulator(t *testing.T) { } err = populate(add) - require.NotNil(t, err) + require.Error(t, err) require.Contains(t, err.Error(), fmt.Sprintf("Invalid bundle %s, replaces nonexistent bundle %s", "etcdoperator.v0.9.2", "etcdoperator.v0.9.0")) require.Contains(t, err.Error(), fmt.Sprintf("Invalid bundle %s, replaces nonexistent bundle %s", "prometheusoperator.0.22.2", "prometheusoperator.0.15.0")) } @@ -844,7 +845,7 @@ func TestDeprecateBundle(t *testing.T) { }, }, expected: expected{ - err: errors.NewAggregate([]error{fmt.Errorf("error deprecating bundle quay.io/test/prometheus.0.22.2: %s", registry.ErrRemovingDefaultChannelDuringDeprecation)}), + err: utilerrors.NewAggregate([]error{fmt.Errorf("error deprecating bundle quay.io/test/prometheus.0.22.2: %s", registry.ErrRemovingDefaultChannelDuringDeprecation)}), remainingBundles: []string{ "quay.io/test/etcd.0.9.0/alpha", "quay.io/test/etcd.0.9.0/beta", @@ -910,7 +911,7 @@ func TestDeprecateBundle(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() querier, err := createAndPopulateDB(db) @@ -1122,7 +1123,7 @@ func TestDeprecatePackage(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() querier, err := createAndPopulateDB(db) @@ -1630,7 +1631,7 @@ func TestAddAfterDeprecate(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := sqlite.NewSQLLiteLoader(db, sqlite.WithEnableAlpha(true)) @@ -2050,7 +2051,7 @@ func TestOverwrite(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := sqlite.NewSQLLiteLoader(db) @@ -2876,7 +2877,7 @@ func TestSubstitutesFor(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := sqlite.NewSQLLiteLoader(db, sqlite.WithEnableAlpha(true)) @@ -2919,7 +2920,7 @@ func TestSubstitutesFor(t *testing.T) { if bundleThatReplaces != nil { require.Equal(t, tt.expected.whatReplaces[bundle.CsvName][bundle.ChannelName], bundleThatReplaces.CsvName) } else { - require.Equal(t, tt.expected.whatReplaces[bundle.CsvName][bundle.ChannelName], "") + require.Equal(t, "", tt.expected.whatReplaces[bundle.CsvName][bundle.ChannelName]) } substitution, err := getBundleSubstitution(context.Background(), db, bundle.CsvName) require.NoError(t, err) @@ -2992,7 +2993,7 @@ func TestEnableAlpha(t *testing.T) { enableAlpha: false, }, expected: expected{ - err: errors.NewAggregate([]error{fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.")}), + err: utilerrors.NewAggregate([]error{fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.")}), }, }, } @@ -3000,7 +3001,7 @@ func TestEnableAlpha(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := sqlite.NewSQLLiteLoader(db, sqlite.WithEnableAlpha(tt.args.enableAlpha)) @@ -3048,10 +3049,10 @@ func newUnpackedTestBundle(root, dir, name string, csvSpec json.RawMessage, anno } rawCSV, err := json.Marshal(registry.ClusterServiceVersion{ - TypeMeta: v1.TypeMeta{ + TypeMeta: metav1.TypeMeta{ Kind: sqlite.ClusterServiceVersionKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: csvSpec, @@ -3064,14 +3065,17 @@ func newUnpackedTestBundle(root, dir, name string, csvSpec json.RawMessage, anno if err := json.Unmarshal(rawCSV, &rawObj); err != nil { return bundleDir, cleanup, err } - rawObj.SetCreationTimestamp(v1.Time{}) + rawObj.SetCreationTimestamp(metav1.Time{}) jsonout, err := rawObj.MarshalJSON() + if err != nil { + return bundleDir, cleanup, err + } out, err := yaml.JSONToYAML(jsonout) if err != nil { return bundleDir, cleanup, err } - if err := os.WriteFile(filepath.Join(bundleDir, bundle.ManifestsDir, "csv.yaml"), out, 0666); err != nil { + if err := os.WriteFile(filepath.Join(bundleDir, bundle.ManifestsDir, "csv.yaml"), out, 0600); err != nil { return bundleDir, cleanup, err } @@ -3079,7 +3083,7 @@ func newUnpackedTestBundle(root, dir, name string, csvSpec json.RawMessage, anno if err != nil { return bundleDir, cleanup, err } - if err := os.WriteFile(filepath.Join(bundleDir, bundle.MetadataDir, "annotations.yaml"), out, 0666); err != nil { + if err := os.WriteFile(filepath.Join(bundleDir, bundle.MetadataDir, "annotations.yaml"), out, 0600); err != nil { return bundleDir, cleanup, err } return bundleDir, cleanup, nil @@ -3093,24 +3097,25 @@ func TestValidateEdgeBundlePackage(t *testing.T) { spec := v1alpha1.ClusterServiceVersionSpec{ Replaces: replaces, Skips: skips, - Version: version.OperatorVersion{v}, + Version: version.OperatorVersion{Version: v}, } - specJson, err := json.Marshal(&spec) + specJSON, err := json.Marshal(&spec) require.NoError(t, err) rawCSV, err := json.Marshal(registry.ClusterServiceVersion{ - TypeMeta: v1.TypeMeta{ + TypeMeta: metav1.TypeMeta{ Kind: sqlite.ClusterServiceVersionKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: specJson, + Spec: specJSON, }) + require.NoError(t, err) rawObj := unstructured.Unstructured{} require.NoError(t, json.Unmarshal(rawCSV, &rawObj)) - rawObj.SetCreationTimestamp(v1.Time{}) + rawObj.SetCreationTimestamp(metav1.Time{}) jsonout, err := rawObj.MarshalJSON() require.NoError(t, err) @@ -3121,7 +3126,7 @@ func TestValidateEdgeBundlePackage(t *testing.T) { } logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := createAndPopulateDB(db) diff --git a/pkg/registry/types_test.go b/pkg/registry/types_test.go index c74275dde..943f72423 100644 --- a/pkg/registry/types_test.go +++ b/pkg/registry/types_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCelConstraintValidation(t *testing.T) { @@ -52,13 +52,13 @@ func TestCelConstraintValidation(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var dep CelConstraint err := json.Unmarshal([]byte(tt.constraint), &dep) - assert.NoError(t, err) + require.NoError(t, err) errs := dep.Validate() if len(tt.errs) > 0 { - assert.Error(t, errs[0]) - assert.Contains(t, errs[0].Error(), tt.errs[0].Error()) + require.Error(t, errs[0]) + require.Contains(t, errs[0].Error(), tt.errs[0].Error()) } else { - assert.Equal(t, len(errs), 0) + require.Empty(t, errs) } }) } diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 1fd4d4745..51d6b1008 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -1,6 +1,7 @@ package server import ( + "errors" "fmt" "io" "io/fs" @@ -146,7 +147,7 @@ func TestMain(m *testing.M) { var wg sync.WaitGroup wg.Add(3) go func() { - lis, err := net.Listen("tcp", dbPort) + lis, err := net.Listen("tcp", fmt.Sprintf("localhost%s", dbPort)) if err != nil { logrus.Fatalf("failed to listen: %v", err) } @@ -156,7 +157,7 @@ func TestMain(m *testing.M) { } }() go func() { - lis, err := net.Listen("tcp", cachePort) + lis, err := net.Listen("tcp", fmt.Sprintf("localhost%s", cachePort)) if err != nil { logrus.Fatalf("failed to listen: %v", err) } @@ -216,7 +217,7 @@ func testListPackages(addr string, expected []string) func(*testing.T) { go func(t *testing.T) { for { in, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { // read done. close(waitc) return @@ -326,8 +327,8 @@ func TestGetBundle(t *testing.T) { }, } ) - t.Run("Sqlite", testGetBundle(dbAddress, etcdoperator_v0_9_2("alpha", false, false, includeManifestsAll))) - t.Run("FBCCache", testGetBundle(cacheAddress, etcdoperator_v0_9_2("alpha", false, true, includeManifestsAll))) + t.Run("Sqlite", testGetBundle(dbAddress, etcdoperatorV0_9_2("alpha", false, false, includeManifestsAll))) + t.Run("FBCCache", testGetBundle(cacheAddress, etcdoperatorV0_9_2("alpha", false, true, includeManifestsAll))) t.Run("FBCCacheWithDeprecations", testGetBundle(deprecationCacheAddress, cockroachBundle)) } @@ -345,13 +346,13 @@ func testGetBundle(addr string, expected *api.Bundle) func(*testing.T) { func TestGetBundleForChannel(t *testing.T) { { - b := etcdoperator_v0_9_2("alpha", false, false, includeManifestsAll) + b := etcdoperatorV0_9_2("alpha", false, false, includeManifestsAll) t.Run("Sqlite", testGetBundleForChannel(dbAddress, &api.Bundle{ CsvName: b.CsvName, CsvJson: b.CsvJson + "\n", })) } - t.Run("FBCCache", testGetBundleForChannel(cacheAddress, etcdoperator_v0_9_2("alpha", false, true, includeManifestsAll))) + t.Run("FBCCache", testGetBundleForChannel(cacheAddress, etcdoperatorV0_9_2("alpha", false, true, includeManifestsAll))) } func testGetBundleForChannel(addr string, expected *api.Bundle) func(*testing.T) { @@ -416,7 +417,7 @@ func testGetChannelEntriesThatReplace(addr string, expected []*api.ChannelEntry) go func(t *testing.T) { for { in, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { // read done. close(waitc) return @@ -455,8 +456,8 @@ func testGetChannelEntriesThatReplace(addr string, expected []*api.ChannelEntry) } func TestGetBundleThatReplaces(t *testing.T) { - t.Run("Sqlite", testGetBundleThatReplaces(dbAddress, etcdoperator_v0_9_2("alpha", false, false, includeManifestsAll))) - t.Run("FBCCache", testGetBundleThatReplaces(cacheAddress, etcdoperator_v0_9_2("alpha", false, true, includeManifestsAll))) + t.Run("Sqlite", testGetBundleThatReplaces(dbAddress, etcdoperatorV0_9_2("alpha", false, false, includeManifestsAll))) + t.Run("FBCCache", testGetBundleThatReplaces(cacheAddress, etcdoperatorV0_9_2("alpha", false, true, includeManifestsAll))) } func testGetBundleThatReplaces(addr string, expected *api.Bundle) func(*testing.T) { @@ -471,8 +472,8 @@ func testGetBundleThatReplaces(addr string, expected *api.Bundle) func(*testing. } func TestGetBundleThatReplacesSynthetic(t *testing.T) { - t.Run("Sqlite", testGetBundleThatReplacesSynthetic(dbAddress, etcdoperator_v0_9_2("alpha", false, false, includeManifestsAll))) - t.Run("FBCCache", testGetBundleThatReplacesSynthetic(cacheAddress, etcdoperator_v0_9_2("alpha", false, true, includeManifestsAll))) + t.Run("Sqlite", testGetBundleThatReplacesSynthetic(dbAddress, etcdoperatorV0_9_2("alpha", false, false, includeManifestsAll))) + t.Run("FBCCache", testGetBundleThatReplacesSynthetic(cacheAddress, etcdoperatorV0_9_2("alpha", false, true, includeManifestsAll))) } func testGetBundleThatReplacesSynthetic(addr string, expected *api.Bundle) func(*testing.T) { @@ -505,7 +506,7 @@ func testGetChannelEntriesThatProvide(addr string) func(t *testing.T) { go func(t *testing.T) { for { in, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { // read done. close(waitc) return @@ -622,7 +623,7 @@ func testGetLatestChannelEntriesThatProvide(addr string) func(t *testing.T) { go func(t *testing.T) { for { in, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { // read done. close(waitc) return @@ -681,8 +682,8 @@ func testGetLatestChannelEntriesThatProvide(addr string) func(t *testing.T) { } func TestGetDefaultBundleThatProvides(t *testing.T) { - t.Run("Sqlite", testGetDefaultBundleThatProvides(dbAddress, etcdoperator_v0_9_2("alpha", false, false, includeManifestsAll))) - t.Run("FBCCache", testGetDefaultBundleThatProvides(cacheAddress, etcdoperator_v0_9_2("alpha", false, true, includeManifestsAll))) + t.Run("Sqlite", testGetDefaultBundleThatProvides(dbAddress, etcdoperatorV0_9_2("alpha", false, false, includeManifestsAll))) + t.Run("FBCCache", testGetDefaultBundleThatProvides(cacheAddress, etcdoperatorV0_9_2("alpha", false, true, includeManifestsAll))) } func testGetDefaultBundleThatProvides(addr string, expected *api.Bundle) func(*testing.T) { @@ -698,11 +699,11 @@ func testGetDefaultBundleThatProvides(addr string, expected *api.Bundle) func(*t func TestListBundles(t *testing.T) { t.Run("Sqlite", testListBundles(dbAddress, - etcdoperator_v0_9_2("alpha", true, false, includeManifestsNone), - etcdoperator_v0_9_2("stable", true, false, includeManifestsNone))) + etcdoperatorV0_9_2("alpha", true, false, includeManifestsNone), + etcdoperatorV0_9_2("stable", true, false, includeManifestsNone))) t.Run("FBCCache", testListBundles(cacheAddress, - etcdoperator_v0_9_2("alpha", true, true, includeManifestsNone), - etcdoperator_v0_9_2("stable", true, true, includeManifestsNone))) + etcdoperatorV0_9_2("alpha", true, true, includeManifestsNone), + etcdoperatorV0_9_2("stable", true, true, includeManifestsNone))) } func testListBundles(addr string, etcdAlpha *api.Bundle, etcdStable *api.Bundle) func(*testing.T) { @@ -747,7 +748,7 @@ func testListBundles(addr string, etcdAlpha *api.Bundle, etcdStable *api.Bundle) for { in, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { // read done. close(waitc) return @@ -826,7 +827,7 @@ const ( includeManifestsCSVOnly includeManifests = "csvOnly" ) -func etcdoperator_v0_9_2(channel string, addSkipsReplaces, addExtraProperties bool, includeManifests includeManifests) *api.Bundle { +func etcdoperatorV0_9_2(channel string, addSkipsReplaces, addExtraProperties bool, includeManifests includeManifests) *api.Bundle { b := &api.Bundle{ CsvName: "etcdoperator.v0.9.2", PackageName: "etcd", diff --git a/pkg/sqlite/configmap_test.go b/pkg/sqlite/configmap_test.go index aba816d3d..a09f22f4d 100644 --- a/pkg/sqlite/configmap_test.go +++ b/pkg/sqlite/configmap_test.go @@ -3,24 +3,28 @@ package sqlite import ( "bytes" "context" + "crypto/rand" "database/sql" "fmt" - "math/rand" + "math" + "math/big" "os" "strings" "testing" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/yaml" "github.com/operator-framework/operator-registry/pkg/api" "github.com/operator-framework/operator-registry/pkg/registry" ) -func CreateTestDb(t *testing.T) (*sql.DB, func()) { - dbName := fmt.Sprintf("test-%d.db", rand.Int()) +func CreateTestDB(t *testing.T) (*sql.DB, func()) { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + dbName := fmt.Sprintf("test-%d.db", r.Int64()) db, err := Open(dbName) require.NoError(t, err) @@ -40,7 +44,7 @@ func CreateTestDb(t *testing.T) (*sql.DB, func()) { func TestConfigMapLoader(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -52,7 +56,7 @@ func TestConfigMapLoader(t *testing.T) { require.NoError(t, err, "unable to load configmap from file %s", path) decoder := yaml.NewYAMLOrJSONDecoder(fileReader, 30) - manifest := v1.ConfigMap{} + manifest := corev1.ConfigMap{} err = decoder.Decode(&manifest) require.NoError(t, err, "could not decode contents of file %s into configmap", path) @@ -63,7 +67,7 @@ func TestConfigMapLoader(t *testing.T) { func TestReplaceCycle(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -79,7 +83,7 @@ func TestReplaceCycle(t *testing.T) { []byte("replaces: etcdoperator.v0.9.2"), 1))) decoder := yaml.NewYAMLOrJSONDecoder(sReader, 30) - manifest := v1.ConfigMap{} + manifest := corev1.ConfigMap{} err = decoder.Decode(&manifest) require.NoError(t, err, "could not decode contents of file %s into configmap", path) @@ -89,7 +93,7 @@ func TestReplaceCycle(t *testing.T) { } func TestQuerierForConfigmap(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -100,7 +104,7 @@ func TestQuerierForConfigmap(t *testing.T) { require.NoError(t, err, "unable to load configmap from file %s", path) decoder := yaml.NewYAMLOrJSONDecoder(fileReader, 30) - manifest := v1.ConfigMap{} + manifest := corev1.ConfigMap{} err = decoder.Decode(&manifest) require.NoError(t, err, "could not decode contents of file %s into configmap", path) @@ -186,7 +190,7 @@ func TestQuerierForConfigmap(t *testing.T) { etcdChannelEntries, err := store.GetChannelEntriesThatReplace(context.TODO(), "etcdoperator.v0.9.0") require.NoError(t, err) - require.ElementsMatch(t, []*registry.ChannelEntry{{"etcd", "alpha", "etcdoperator.v0.9.2", "etcdoperator.v0.9.0"}}, etcdChannelEntries) + require.ElementsMatch(t, []*registry.ChannelEntry{{PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.9.2", Replaces: "etcdoperator.v0.9.0"}}, etcdChannelEntries) etcdBundleByReplaces, err := store.GetBundleThatReplaces(context.TODO(), "etcdoperator.v0.9.0", "etcd", "alpha") require.NoError(t, err) @@ -195,17 +199,17 @@ func TestQuerierForConfigmap(t *testing.T) { etcdChannelEntriesThatProvide, err := store.GetChannelEntriesThatProvide(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdCluster") require.NoError(t, err) require.ElementsMatch(t, []*registry.ChannelEntry{ - {"etcd", "alpha", "etcdoperator.v0.6.1", ""}, - {"etcd", "alpha", "etcdoperator.v0.9.0", "etcdoperator.v0.6.1"}, - {"etcd", "alpha", "etcdoperator.v0.9.2", "etcdoperator.v0.9.0"}}, etcdChannelEntriesThatProvide) + {PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.6.1", Replaces: ""}, + {PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.9.0", Replaces: "etcdoperator.v0.6.1"}, + {PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.9.2", Replaces: "etcdoperator.v0.9.0"}}, etcdChannelEntriesThatProvide) etcdChannelEntriesThatProvideAPIServer, err := store.GetChannelEntriesThatProvide(context.TODO(), "etcd.database.coreos.com", "v1beta2", "FakeEtcdObject") require.NoError(t, err) - require.ElementsMatch(t, []*registry.ChannelEntry{{"etcd", "alpha", "etcdoperator.v0.9.0", "etcdoperator.v0.6.1"}}, etcdChannelEntriesThatProvideAPIServer) + require.ElementsMatch(t, []*registry.ChannelEntry{{PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.9.0", Replaces: "etcdoperator.v0.6.1"}}, etcdChannelEntriesThatProvideAPIServer) etcdLatestChannelEntriesThatProvide, err := store.GetLatestChannelEntriesThatProvide(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdCluster") require.NoError(t, err) - require.ElementsMatch(t, []*registry.ChannelEntry{{"etcd", "alpha", "etcdoperator.v0.9.2", "etcdoperator.v0.9.0"}}, etcdLatestChannelEntriesThatProvide) + require.ElementsMatch(t, []*registry.ChannelEntry{{PackageName: "etcd", ChannelName: "alpha", BundleName: "etcdoperator.v0.9.2", Replaces: "etcdoperator.v0.9.0"}}, etcdLatestChannelEntriesThatProvide) etcdBundleByProvides, err := store.GetBundleThatProvides(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdCluster") require.NoError(t, err) diff --git a/pkg/sqlite/conversion_test.go b/pkg/sqlite/conversion_test.go index 659fce484..077b95c6d 100644 --- a/pkg/sqlite/conversion_test.go +++ b/pkg/sqlite/conversion_test.go @@ -41,27 +41,27 @@ func TestToModel(t *testing.T) { require.NoError(t, err) require.NotNil(t, m) require.NoError(t, m.Validate()) - require.Equal(t, 3, len(m)) + require.Len(t, m, 3) require.Equal(t, "etcd", m["etcd"].Name) require.NotNil(t, m["etcd"].Icon) require.Equal(t, "alpha", m["etcd"].DefaultChannel.Name) - require.Equal(t, 3, len(m["etcd"].Channels)) - require.Equal(t, 3, len(m["etcd"].Channels["alpha"].Bundles)) - require.Equal(t, 2, len(m["etcd"].Channels["beta"].Bundles)) - require.Equal(t, 3, len(m["etcd"].Channels["stable"].Bundles)) + require.Len(t, m["etcd"].Channels, 3) + require.Len(t, m["etcd"].Channels["alpha"].Bundles, 3) + require.Len(t, m["etcd"].Channels["beta"].Bundles, 2) + require.Len(t, m["etcd"].Channels["stable"].Bundles, 3) require.Equal(t, "prometheus", m["prometheus"].Name) require.NotNil(t, m["prometheus"].Icon) require.Equal(t, "preview", m["prometheus"].DefaultChannel.Name) - require.Equal(t, 1, len(m["prometheus"].Channels)) - require.Equal(t, 3, len(m["prometheus"].Channels["preview"].Bundles)) + require.Len(t, m["prometheus"].Channels, 1) + require.Len(t, m["prometheus"].Channels["preview"].Bundles, 3) require.Equal(t, "strimzi-kafka-operator", m["strimzi-kafka-operator"].Name) require.NotNil(t, m["strimzi-kafka-operator"].Icon) require.Equal(t, "stable", m["strimzi-kafka-operator"].DefaultChannel.Name) - require.Equal(t, 3, len(m["strimzi-kafka-operator"].Channels)) - require.Equal(t, 4, len(m["strimzi-kafka-operator"].Channels["alpha"].Bundles)) - require.Equal(t, 3, len(m["strimzi-kafka-operator"].Channels["beta"].Bundles)) - require.Equal(t, 2, len(m["strimzi-kafka-operator"].Channels["stable"].Bundles)) + require.Len(t, m["strimzi-kafka-operator"].Channels, 3) + require.Len(t, m["strimzi-kafka-operator"].Channels["alpha"].Bundles, 4) + require.Len(t, m["strimzi-kafka-operator"].Channels["beta"].Bundles, 3) + require.Len(t, m["strimzi-kafka-operator"].Channels["stable"].Bundles, 2) } diff --git a/pkg/sqlite/directory_test.go b/pkg/sqlite/directory_test.go index 070fe178c..e2320fdc7 100644 --- a/pkg/sqlite/directory_test.go +++ b/pkg/sqlite/directory_test.go @@ -17,7 +17,7 @@ import ( func TestDirectoryLoader(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -30,7 +30,7 @@ func TestDirectoryLoader(t *testing.T) { func TestDirectoryLoaderWithBadPackageData(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -47,7 +47,7 @@ func TestDirectoryLoaderWithBadPackageData(t *testing.T) { pkg := new(registry.PackageManifest) require.NoError(t, yaml.NewDecoder(r).Decode(pkg)) - require.True(t, len(pkg.Channels) > 1) + require.Greater(t, len(pkg.Channels), 1) pkg.Channels[0].CurrentCSVName = "imaginary" // Replace file contents @@ -63,7 +63,7 @@ func TestDirectoryLoaderWithBadPackageData(t *testing.T) { func TestDirectoryLoaderWithBadBundleData(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestDirectoryLoaderWithBadBundleData(t *testing.T) { } func TestQuerierForDirectory(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() load, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -192,6 +192,7 @@ func TestQuerierForDirectory(t *testing.T) { EqualBundles(t, *expectedBundle, *etcdBundleByReplaces) etcdChannelEntriesThatProvide, err := store.GetChannelEntriesThatProvide(context.TODO(), "etcd.database.coreos.com", "v1beta2", "EtcdCluster") + require.NoError(t, err) for _, c := range etcdChannelEntriesThatProvide { t.Logf("%#v", c) } diff --git a/pkg/sqlite/graphloader_test.go b/pkg/sqlite/graphloader_test.go index f6c23ca6d..0292a44c8 100644 --- a/pkg/sqlite/graphloader_test.go +++ b/pkg/sqlite/graphloader_test.go @@ -10,8 +10,8 @@ import ( "github.com/operator-framework/operator-registry/pkg/registry" ) -func createLoadedTestDb(t *testing.T) (*sql.DB, func()) { - db, cleanup := CreateTestDb(t) +func createLoadedTestDB(t *testing.T) (*sql.DB, func()) { + db, cleanup := CreateTestDB(t) store, err := NewSQLLiteLoader(db) require.NoError(t, err) require.NoError(t, store.Migrate(context.TODO())) @@ -65,7 +65,7 @@ func TestLoadPackageGraph_Etcd(t *testing.T) { }, } - db, cleanup := createLoadedTestDb(t) + db, cleanup := createLoadedTestDB(t) defer cleanup() graphLoader, err := NewSQLGraphLoaderFromDB(db) @@ -75,7 +75,7 @@ func TestLoadPackageGraph_Etcd(t *testing.T) { require.NoError(t, err) require.Equal(t, "etcd", result.Name) - require.Equal(t, 3, len(result.Channels)) + require.Len(t, result.Channels, 3) for channelName, channel := range result.Channels { expectedChannel := expectedGraph.Channels[channelName] @@ -85,7 +85,7 @@ func TestLoadPackageGraph_Etcd(t *testing.T) { } func TestLoadPackageGraph_Etcd_NotFound(t *testing.T) { - db, cleanup := createLoadedTestDb(t) + db, cleanup := createLoadedTestDB(t) defer cleanup() graphLoader, err := NewSQLGraphLoaderFromDB(db) diff --git a/pkg/sqlite/load_test.go b/pkg/sqlite/load_test.go index 35f9441f0..27c17c53f 100644 --- a/pkg/sqlite/load_test.go +++ b/pkg/sqlite/load_test.go @@ -129,7 +129,7 @@ func TestAddPackageChannels(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -138,7 +138,7 @@ func TestAddPackageChannels(t *testing.T) { for _, bundle := range tt.fields.bundles { // Throw away any errors loading bundles (not testing this) - store.AddOperatorBundle(bundle) + _ = store.AddOperatorBundle(bundle) } for i, pkg := range tt.args.pkgs { @@ -157,7 +157,7 @@ func TestAddPackageChannels(t *testing.T) { func TestAddBundleSemver(t *testing.T) { // Create a test DB - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -210,7 +210,7 @@ func TestAddBundleSemver(t *testing.T) { if b.PackageName != "pkg-0" { continue } - require.Len(t, b.Skips, 0, "unexpected skips value(s) for bundle %q", b.CsvName) + require.Empty(t, b.Skips, "unexpected skips value(s) for bundle %q", b.CsvName) replaces[b.CsvName] = b.Replaces } require.Equal(t, map[string]string{ @@ -221,7 +221,7 @@ func TestAddBundleSemver(t *testing.T) { } func TestClearNonHeadBundles(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -307,8 +307,8 @@ func newUnstructuredCSVWithVersion(t *testing.T, name, version string) *unstruct csv := ®istry.ClusterServiceVersion{} csv.TypeMeta.Kind = "ClusterServiceVersion" csv.SetName(name) - versionJson := fmt.Sprintf(`{"version": "%s"}`, version) - csv.Spec = json.RawMessage(versionJson) + versionJSON := fmt.Sprintf(`{"version": "%s"}`, version) + csv.Spec = json.RawMessage(versionJSON) out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(csv) require.NoError(t, err) @@ -329,7 +329,7 @@ func newBundle(t *testing.T, name, pkgName string, channels []string, objs ...*u } func TestRMBundle(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -600,7 +600,7 @@ func TestDeprecationAwareLoader(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewDeprecationAwareLoader(db) require.NoError(t, err) @@ -643,7 +643,7 @@ func TestDeprecationAwareLoader(t *testing.T) { delete(bundleMap, bundleName) } - require.Len(t, bundleMap, 0, "not all expected bundles exist in %s table: %v", table, bundleMap) + require.Empty(t, bundleMap, "not all expected bundles exist in %s table: %v", table, bundleMap) } checkForBundles(`SELECT operatorbundle_name FROM deprecated`, "deprecated", tt.expected.deprecated) // operatorbundle_name: @@ -868,7 +868,7 @@ func TestGetTailFromBundle(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -992,7 +992,7 @@ func TestAddBundlePropertiesFromAnnotations(t *testing.T) { }, } { t.Run(tt.description, func(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() s, err := NewSQLLiteLoader(db) @@ -1210,7 +1210,7 @@ func TestRemoveOverwrittenChannelHead(t *testing.T) { } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -1219,12 +1219,12 @@ func TestRemoveOverwrittenChannelHead(t *testing.T) { for _, bundle := range tt.fields.bundles { // Throw away any errors loading bundles (not testing this) - store.AddOperatorBundle(bundle) + _ = store.AddOperatorBundle(bundle) } for _, pkg := range tt.fields.pkgs { // Throw away any errors loading packages (not testing this) - store.AddPackageChannels(pkg) + _ = store.AddPackageChannels(pkg) } getDefaultChannel := func(pkg string) sql.NullString { diff --git a/pkg/sqlite/migrations/001_related_images_test.go b/pkg/sqlite/migrations/001_related_images_test.go index 495c7b46f..c9336cf9b 100644 --- a/pkg/sqlite/migrations/001_related_images_test.go +++ b/pkg/sqlite/migrations/001_related_images_test.go @@ -2,12 +2,13 @@ package migrations_test import ( "context" + "crypto/rand" "database/sql" "fmt" - "math/rand" + "math" + "math/big" "os" "testing" - "time" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" @@ -16,12 +17,11 @@ import ( "github.com/operator-framework/operator-registry/pkg/sqlite/migrations" ) -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} +func CreateTestDBAt(t *testing.T, key int) (*sql.DB, sqlite.Migrator, func()) { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) -func CreateTestDbAt(t *testing.T, key int) (*sql.DB, sqlite.Migrator, func()) { - dbName := fmt.Sprintf("%d.db", rand.Int()) + dbName := fmt.Sprintf("%d.db", r) logrus.SetLevel(logrus.DebugLevel) db, err := sqlite.Open(dbName) @@ -48,7 +48,7 @@ func CreateTestDbAt(t *testing.T, key int) (*sql.DB, sqlite.Migrator, func()) { func TestRelatedImagesUp(t *testing.T) { // migrate up to, but not including, this migration - db, migrator, cleanup := CreateTestDbAt(t, migrations.RelatedImagesMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.RelatedImagesMigrationKey-1) defer cleanup() // Add a test bundle without extracting related_images @@ -78,7 +78,7 @@ func TestRelatedImagesUp(t *testing.T) { } func TestRelatedImagesDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.RelatedImagesMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.RelatedImagesMigrationKey) defer cleanup() // Add a test bundle that has related images diff --git a/pkg/sqlite/migrations/002_bundle_path_test.go b/pkg/sqlite/migrations/002_bundle_path_test.go index 469a3c996..cf081a86a 100644 --- a/pkg/sqlite/migrations/002_bundle_path_test.go +++ b/pkg/sqlite/migrations/002_bundle_path_test.go @@ -11,7 +11,7 @@ import ( ) func TestBundlePathUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.BundlePathMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.BundlePathMigrationKey-1) defer cleanup() err := migrator.Up(context.TODO(), migrations.Only(migrations.BundlePathMigrationKey)) @@ -19,6 +19,7 @@ func TestBundlePathUp(t *testing.T) { // Adding row with bundlepath colum should not fail after migrating up tx, err := db.Begin() + require.NoError(t, err) stmt, err := tx.Prepare("insert into operatorbundle(name, csv, bundle, bundlepath) values(?, ?, ?, ?)") require.NoError(t, err) defer stmt.Close() @@ -28,16 +29,18 @@ func TestBundlePathUp(t *testing.T) { } func TestBundlePathDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.BundlePathMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.BundlePathMigrationKey) defer cleanup() querier := sqlite.NewSQLLiteQuerierFromDb(db) imagesBeforeMigration, err := querier.GetImagesForBundle(context.TODO(), "etcdoperator.v0.6.1") + require.NoError(t, err) err = migrator.Down(context.TODO(), migrations.Only(migrations.BundlePathMigrationKey)) require.NoError(t, err) imagesAfterMigration, err := querier.GetImagesForBundle(context.TODO(), "etcdoperator.v0.6.1") + require.NoError(t, err) // Migrating down entails sensitive operations. Ensure data is preserved across down migration require.Equal(t, len(imagesBeforeMigration), len(imagesAfterMigration)) diff --git a/pkg/sqlite/migrations/003_required_apis_test.go b/pkg/sqlite/migrations/003_required_apis_test.go index 6d464160f..415315160 100644 --- a/pkg/sqlite/migrations/003_required_apis_test.go +++ b/pkg/sqlite/migrations/003_required_apis_test.go @@ -12,7 +12,7 @@ import ( func TestRequiredApisUp(t *testing.T) { // migrate up to, but not including, this migration - db, migrator, cleanup := CreateTestDbAt(t, migrations.RequiredApiMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.RequiredApiMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -54,15 +54,15 @@ func TestRequiredApisUp(t *testing.T) { var plural sql.NullString rows.Next() require.NoError(t, rows.Scan(&group, &version, &kind, &plural)) - require.Equal(t, group.String, "etcd.database.coreos.com") - require.Equal(t, version.String, "v1beta2") - require.Equal(t, kind.String, "EtcdCluster") - require.Equal(t, plural.String, "etcdclusters") + require.Equal(t, "etcd.database.coreos.com", group.String) + require.Equal(t, "v1beta2", version.String) + require.Equal(t, "EtcdCluster", kind.String) + require.Equal(t, "etcdclusters", plural.String) require.NoError(t, rows.Close()) } func TestRequiredApisDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.RequiredApiMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.RequiredApiMigrationKey) defer cleanup() // Add a required api @@ -86,10 +86,10 @@ func TestRequiredApisDown(t *testing.T) { var plural sql.NullString rows.Next() require.NoError(t, rows.Scan(&group, &version, &kind, &plural)) - require.Equal(t, group.String, "etcd.database.coreos.com") - require.Equal(t, version.String, "v1beta2") - require.Equal(t, kind.String, "EtcdCluster") - require.Equal(t, plural.String, "etcdclusters") + require.Equal(t, "etcd.database.coreos.com", group.String) + require.Equal(t, "v1beta2", version.String) + require.Equal(t, "EtcdCluster", kind.String) + require.Equal(t, "etcdclusters", plural.String) require.NoError(t, rows.Close()) // run down migration diff --git a/pkg/sqlite/migrations/004_cascade_delete_test.go b/pkg/sqlite/migrations/004_cascade_delete_test.go index 7193568a0..bc2bb7398 100644 --- a/pkg/sqlite/migrations/004_cascade_delete_test.go +++ b/pkg/sqlite/migrations/004_cascade_delete_test.go @@ -13,19 +13,18 @@ import ( func TestBeforeCascadeDeleteUp(t *testing.T) { // migrate up to, but not including, this migration - db, _, cleanup := CreateTestDbAt(t, migrations.CascadeDeleteMigrationKey-1) + db, _, cleanup := CreateTestDBAt(t, migrations.CascadeDeleteMigrationKey-1) defer cleanup() tx, err := db.Begin() require.NoError(t, err) - err = checkMigrationInPreviousState(t, tx) - require.NoError(t, err) + checkMigrationInPreviousState(t, tx) } func TestAfterCascadeDeleteUp(t *testing.T) { // migrate up to, but not including, this migration - db, migrator, cleanup := CreateTestDbAt(t, migrations.CascadeDeleteMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.CascadeDeleteMigrationKey-1) defer cleanup() // run up migration @@ -35,23 +34,21 @@ func TestAfterCascadeDeleteUp(t *testing.T) { tx, err := db.Begin() require.NoError(t, err) - err = checkMigrationInNextState(t, tx) - require.NoError(t, err) + checkMigrationInNextState(t, tx) } func TestBeforeCascadeDeleteDown(t *testing.T) { - db, _, cleanup := CreateTestDbAt(t, migrations.CascadeDeleteMigrationKey) + db, _, cleanup := CreateTestDBAt(t, migrations.CascadeDeleteMigrationKey) defer cleanup() tx, err := db.Begin() require.NoError(t, err) - err = checkMigrationInNextState(t, tx) - require.NoError(t, err) + checkMigrationInNextState(t, tx) } func TestAferCascadeDeleteDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.CascadeDeleteMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.CascadeDeleteMigrationKey) defer cleanup() // run down migration @@ -61,8 +58,7 @@ func TestAferCascadeDeleteDown(t *testing.T) { tx, err := db.Begin() require.NoError(t, err) - err = checkMigrationInPreviousState(t, tx) - require.NoError(t, err) + checkMigrationInPreviousState(t, tx) } func removeWhiteSpaces(s string) string { @@ -73,7 +69,7 @@ func removeWhiteSpaces(s string) string { return s } -func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { +func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) { getCreateTableStatement := func(table string) string { return `SELECT sql FROM sqlite_master where name="` + table + `"` } @@ -130,6 +126,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { var createStatement string table, err := tx.Query(getCreateTableStatement("operatorbundle")) + require.NoError(t, err) hasRows := table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -139,6 +136,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("package")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -148,6 +146,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("channel")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -157,6 +156,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("channel_entry")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -166,6 +166,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("api_requirer")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -175,6 +176,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("api_provider")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -184,6 +186,7 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("related_image")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -191,11 +194,9 @@ func checkMigrationInPreviousState(t *testing.T, tx *sql.Tx) error { require.Equal(t, removeWhiteSpaces(createNewRelatedImageTable), removeWhiteSpaces(createStatement)) err = table.Close() require.NoError(t, err) - - return nil } -func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { +func checkMigrationInNextState(t *testing.T, tx *sql.Tx) { getCreateTableStatement := func(table string) string { return `SELECT sql FROM sqlite_master where name="` + table + `"` } @@ -252,6 +253,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { var createStatement string table, err := tx.Query(getCreateTableStatement("operatorbundle")) + require.NoError(t, err) hasRows := table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -261,6 +263,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("package")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -270,6 +273,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("channel")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -279,6 +283,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("channel_entry")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -288,6 +293,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("api_requirer")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -297,6 +303,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("api_provider")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -306,6 +313,7 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.NoError(t, err) table, err = tx.Query(getCreateTableStatement("related_image")) + require.NoError(t, err) hasRows = table.Next() require.True(t, hasRows) err = table.Scan(&createStatement) @@ -313,6 +321,4 @@ func checkMigrationInNextState(t *testing.T, tx *sql.Tx) error { require.Equal(t, removeWhiteSpaces(createNewRelatedImageTable), removeWhiteSpaces(createStatement)) err = table.Close() require.NoError(t, err) - - return nil } diff --git a/pkg/sqlite/migrations/005_version_skiprange_test.go b/pkg/sqlite/migrations/005_version_skiprange_test.go index 6e4bf1423..01e168497 100644 --- a/pkg/sqlite/migrations/005_version_skiprange_test.go +++ b/pkg/sqlite/migrations/005_version_skiprange_test.go @@ -11,7 +11,7 @@ import ( ) func TestVersioningUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.VersionSkipRangeMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.VersionSkipRangeMigrationKey-1) defer cleanup() err := migrator.Up(context.TODO(), migrations.Only(migrations.VersionSkipRangeMigrationKey)) @@ -29,7 +29,7 @@ func TestVersioningUp(t *testing.T) { } func TestVersioningDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.VersionSkipRangeMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.VersionSkipRangeMigrationKey) defer cleanup() // Add a bundle without extracting required_apis @@ -57,11 +57,11 @@ func TestVersioningDown(t *testing.T) { var skipRange sql.NullString rows.Next() require.NoError(t, rows.Scan(&name, &csv, &bundle, &version, &skipRange)) - require.Equal(t, name.String, "etcdoperator.v0.6.1") + require.Equal(t, "etcdoperator.v0.6.1", name.String) require.Equal(t, csv.String, testCSV) require.Equal(t, bundle.String, testBundle) - require.Equal(t, version.String, "0.6.1") - require.Equal(t, skipRange.String, ">0.5.0 <0.6.1") + require.Equal(t, "0.6.1", version.String) + require.Equal(t, ">0.5.0 <0.6.1", skipRange.String) require.NoError(t, rows.Close()) // run down migration @@ -73,7 +73,7 @@ func TestVersioningDown(t *testing.T) { require.NoError(t, err) rows.Next() require.NoError(t, rows.Scan(&name, &csv, &bundle)) - require.Equal(t, name.String, "etcdoperator.v0.6.1") + require.Equal(t, "etcdoperator.v0.6.1", name.String) require.Equal(t, csv.String, testCSV) require.Equal(t, bundle.String, testBundle) require.NoError(t, rows.Close()) diff --git a/pkg/sqlite/migrations/006_associate_apis_with_bundle_test.go b/pkg/sqlite/migrations/006_associate_apis_with_bundle_test.go index dc036de4b..bd815ad1a 100644 --- a/pkg/sqlite/migrations/006_associate_apis_with_bundle_test.go +++ b/pkg/sqlite/migrations/006_associate_apis_with_bundle_test.go @@ -13,7 +13,7 @@ import ( ) func TestAssociateApisWithBundleUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.AssociateApisWithBundleMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.AssociateApisWithBundleMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -32,7 +32,7 @@ func TestAssociateApisWithBundleUp(t *testing.T) { require.NoError(t, err) result, err := tx.Exec("insert into channel_entry(channel_name, package_name, operatorbundle_name, depth) values(?, ?, ?, ?)", "alpha", "etcd", "etcdoperator.v0.6.1", 0) require.NoError(t, err) - entry_id, err := result.LastInsertId() + entryID, err := result.LastInsertId() require.NoError(t, err) _, err = tx.Exec("insert into api(group_name, version, kind, plural) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdClusters", "etcdclusters") require.NoError(t, err) @@ -40,11 +40,11 @@ func TestAssociateApisWithBundleUp(t *testing.T) { require.NoError(t, err) _, err = tx.Exec("insert into api(group_name, version, kind, plural) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdRestores", "etcdrestores") require.NoError(t, err) - _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdClusters", entry_id) + _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdClusters", entryID) require.NoError(t, err) - _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdBackups", entry_id) + _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdBackups", entryID) require.NoError(t, err) - _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdRestores", entry_id) + _, err = tx.Exec("insert into api_provider(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)", "etcd.database.coreos.com", "v1alpha1", "EtcdRestores", entryID) require.NoError(t, err) require.NoError(t, tx.Commit()) _, err = db.Exec(`PRAGMA foreign_keys = 1`) @@ -70,7 +70,7 @@ func TestAssociateApisWithBundleUp(t *testing.T) { } func TestAssociateApisWithBundleDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.AssociateApisWithBundleMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.AssociateApisWithBundleMigrationKey) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -106,6 +106,7 @@ func TestAssociateApisWithBundleDown(t *testing.T) { require.NoError(t, err) entriesBeforeMigration, err := newGetChannelEntriesThatProvide(db, "etcd.database.coreos.com", "v1alpha1", "EtcdRestores") + require.NoError(t, err) err = migrator.Down(context.TODO(), migrations.Only(migrations.AssociateApisWithBundleMigrationKey)) require.NoError(t, err) @@ -117,7 +118,7 @@ func TestAssociateApisWithBundleDown(t *testing.T) { require.EqualValues(t, entriesBeforeMigration, entriesAfterMigration) } -func oldGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func oldGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name FROM channel_entry INNER JOIN api_provider ON channel_entry.entry_id = api_provider.channel_entry_id @@ -126,11 +127,11 @@ func oldGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e rows, err := db.Query(query, group, version, kind) if err != nil { - return + return nil, err } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries = []*registry.ChannelEntry{} for rows.Next() { var pkgNameSQL sql.NullString @@ -138,7 +139,7 @@ func oldGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e var bundleNameSQL sql.NullString var replacesSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ @@ -150,12 +151,12 @@ func oldGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind) - return + return nil, err } - return + return entries, nil } -func newGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func newGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name FROM channel_entry INNER JOIN api_provider ON channel_entry.operatorbundle_name = api_provider.operatorbundle_name @@ -164,11 +165,11 @@ func newGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e rows, err := db.Query(query, group, version, kind) if err != nil { - return + return nil, err } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries = []*registry.ChannelEntry{} for rows.Next() { var pkgNameSQL sql.NullString @@ -176,7 +177,7 @@ func newGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e var bundleNameSQL sql.NullString var replacesSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ @@ -188,7 +189,7 @@ func newGetChannelEntriesThatProvide(db *sql.DB, group, version, kind string) (e } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind) - return + return nil, err } - return + return entries, nil } diff --git a/pkg/sqlite/migrations/007_replaces_skips_test.go b/pkg/sqlite/migrations/007_replaces_skips_test.go index 422850dca..1b64b737d 100644 --- a/pkg/sqlite/migrations/007_replaces_skips_test.go +++ b/pkg/sqlite/migrations/007_replaces_skips_test.go @@ -11,7 +11,7 @@ import ( ) func TestReplacesSkipsUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.ReplacesSkipsMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.ReplacesSkipsMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -45,7 +45,7 @@ func TestReplacesSkipsUp(t *testing.T) { _, err = tx.Exec("insert into channel(name, package_name, head_operatorbundle_name) values(?,?,?)", "stable", "etcd", "etcdoperator.v0.9.2") require.NoError(t, err) - channel_entries := ` + channelEntries := ` INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('1', 'alpha', 'etcd', 'etcdoperator.v0.9.2', '4', '0'); INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('2', 'alpha', 'etcd', 'etcdoperator.v0.9.1', '', '1'); INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('3', 'alpha', 'etcd', 'etcdoperator.v0.9.2', '2', '1'); @@ -76,7 +76,7 @@ func TestReplacesSkipsUp(t *testing.T) { INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('28', 'stable', 'etcd', 'etcdoperator.v0.6.1', '27', '5'); INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('29', 'stable', 'etcd', 'etcdoperator.v0.3.2-a', '', '6'); INSERT INTO "main"."channel_entry" ("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('30', 'stable', 'etcd', 'etcdoperator.v0.6.1', '29', '6'); ` - _, err = tx.Exec(channel_entries) + _, err = tx.Exec(channelEntries) require.NoError(t, err) require.NoError(t, tx.Commit()) @@ -125,19 +125,19 @@ func TestReplacesSkipsUp(t *testing.T) { rows, err := db.QueryContext(context.TODO(), getBundle, tt.name) require.NoError(t, err) require.True(t, rows.Next()) - var replacesSql sql.NullString - var skipsSql sql.NullString - require.NoError(t, rows.Scan(&replacesSql, &skipsSql)) + var replacesSQL sql.NullString + var skipsSQL sql.NullString + require.NoError(t, rows.Scan(&replacesSQL, &skipsSQL)) require.False(t, rows.Next()) require.NoError(t, rows.Close()) - require.Equal(t, tt.replaces, replacesSql.String) - require.Equal(t, tt.skips, skipsSql.String) + require.Equal(t, tt.replaces, replacesSQL.String) + require.Equal(t, tt.skips, skipsSQL.String) }) } } func TestReplacesSkipsDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.ReplacesSkipsMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.ReplacesSkipsMigrationKey) defer cleanup() // Add a bundle @@ -159,7 +159,7 @@ func TestReplacesSkipsDown(t *testing.T) { require.NoError(t, err) rows.Next() require.NoError(t, rows.Scan(&name, &csv, &bundle)) - require.Equal(t, name.String, "etcdoperator.v0.6.1") + require.Equal(t, "etcdoperator.v0.6.1", name.String) require.Equal(t, csv.String, testCSV) require.Equal(t, bundle.String, testBundle) require.NoError(t, rows.Close()) diff --git a/pkg/sqlite/migrations/008_dependencies_test.go b/pkg/sqlite/migrations/008_dependencies_test.go index 84f2bfd11..82dfe3d16 100644 --- a/pkg/sqlite/migrations/008_dependencies_test.go +++ b/pkg/sqlite/migrations/008_dependencies_test.go @@ -11,7 +11,7 @@ import ( ) func TestDependenciesUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.DependenciesMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.DependenciesMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -42,13 +42,13 @@ func TestDependenciesUp(t *testing.T) { var typeName sql.NullString var value sql.NullString require.NoError(t, rows.Scan(&typeName, &value)) - require.Equal(t, typeName.String, "olm.gvk") - require.Equal(t, value.String, `{"group":"test.coreos.com","kind":"testapi","type":"olm.gvk","version":"v1"}`) + require.Equal(t, "olm.gvk", typeName.String) + require.Equal(t, `{"group":"test.coreos.com","kind":"testapi","type":"olm.gvk","version":"v1"}`, value.String) require.NoError(t, rows.Close()) } func TestDependenciesDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.DependenciesMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.DependenciesMigrationKey) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -77,7 +77,7 @@ func TestDependenciesDown(t *testing.T) { var typeName sql.NullString var value sql.NullString require.NoError(t, rows.Scan(&typeName, &value)) - require.Equal(t, typeName.String, "olm.package") + require.Equal(t, "olm.package", typeName.String) require.Equal(t, value.String, valueStr) require.NoError(t, rows.Close()) diff --git a/pkg/sqlite/migrations/009_properties_test.go b/pkg/sqlite/migrations/009_properties_test.go index 0cac21ab4..b47cad5a0 100644 --- a/pkg/sqlite/migrations/009_properties_test.go +++ b/pkg/sqlite/migrations/009_properties_test.go @@ -11,7 +11,7 @@ import ( ) func TestPropertiesUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.PropertiesMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.PropertiesMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -28,8 +28,8 @@ func TestPropertiesUp(t *testing.T) { require.NoError(t, err) _, err = tx.Exec("insert into api_provider(group_name, version, kind, operatorbundle_name, operatorbundle_version, operatorbundle_path) values(?, ?, ?, ?, ?, ?)", "test.coreos.com", "v1", "testapi", "etcdoperator.v0.6.1", "0.6.1", "quay.io/image") require.NoError(t, err) - channel_entries := `INSERT INTO channel_entry("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('1', 'alpha', 'etcd', 'etcdoperator.v0.6.1', '', '0');` - _, err = tx.Exec(channel_entries) + channelEntries := `INSERT INTO channel_entry("entry_id", "channel_name", "package_name", "operatorbundle_name", "replaces", "depth") VALUES ('1', 'alpha', 'etcd', 'etcdoperator.v0.6.1', '', '0');` + _, err = tx.Exec(channelEntries) require.NoError(t, err) valueStr := `{"packageName":"etcd","type":"olm.package","version":">0.6.0"}` _, err = tx.Exec("insert into dependencies(type, value, operatorbundle_name, operatorbundle_version, operatorbundle_path) VALUES (?, ?, ?, ?, ?)", "olm.package", valueStr, "etcdoperator.v0.6.1", "0.6.1", "quay.io/image") @@ -105,7 +105,7 @@ func TestPropertiesUp(t *testing.T) { } func TestPropertiesDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.PropertiesMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.PropertiesMigrationKey) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -134,7 +134,7 @@ func TestPropertiesDown(t *testing.T) { var typeName sql.NullString var value sql.NullString require.NoError(t, rows.Scan(&typeName, &value)) - require.Equal(t, typeName.String, "olm.package") + require.Equal(t, "olm.package", typeName.String) require.Equal(t, value.String, valueStr) require.NoError(t, rows.Close()) diff --git a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property_test.go b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property_test.go index 860a63810..e1465d146 100644 --- a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property_test.go +++ b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property_test.go @@ -11,7 +11,7 @@ import ( ) func TestBundlePathPropertyUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.BundlePathPkgMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.BundlePathPkgMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -48,7 +48,7 @@ func TestBundlePathPropertyUp(t *testing.T) { } func TestBundlePathPropertyDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.BundlePathPkgMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.BundlePathPkgMigrationKey) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) diff --git a/pkg/sqlite/migrations/011_substitutes_for_test.go b/pkg/sqlite/migrations/011_substitutes_for_test.go index f92e0608b..555894a45 100644 --- a/pkg/sqlite/migrations/011_substitutes_for_test.go +++ b/pkg/sqlite/migrations/011_substitutes_for_test.go @@ -11,7 +11,7 @@ import ( ) func TestSubstitutesForUp(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.SubstitutesForMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.SubstitutesForMigrationKey-1) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) @@ -44,7 +44,7 @@ func TestSubstitutesForUp(t *testing.T) { } func TestSubstitutesForDown(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.SubstitutesForMigrationKey) + db, migrator, cleanup := CreateTestDBAt(t, migrations.SubstitutesForMigrationKey) defer cleanup() _, err := db.Exec(`PRAGMA foreign_keys = 0`) diff --git a/pkg/sqlite/migrations/012_deprecated_test.go b/pkg/sqlite/migrations/012_deprecated_test.go index 06e3b4ca9..0d57de955 100644 --- a/pkg/sqlite/migrations/012_deprecated_test.go +++ b/pkg/sqlite/migrations/012_deprecated_test.go @@ -12,7 +12,7 @@ import ( ) func TestDeprecated(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.DeprecatedMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.DeprecatedMigrationKey-1) defer cleanup() // Insert fixture bundles to satisfy foreign key constraint in properties table diff --git a/pkg/sqlite/migrations/013_rm_truncated_deprecations_test.go b/pkg/sqlite/migrations/013_rm_truncated_deprecations_test.go index 8977d73ca..5ed881515 100644 --- a/pkg/sqlite/migrations/013_rm_truncated_deprecations_test.go +++ b/pkg/sqlite/migrations/013_rm_truncated_deprecations_test.go @@ -11,7 +11,7 @@ import ( ) func TestRmTruncatedDeprecations(t *testing.T) { - db, migrator, cleanup := CreateTestDbAt(t, migrations.RmTruncatedDeprecationsMigrationKey-1) + db, migrator, cleanup := CreateTestDBAt(t, migrations.RmTruncatedDeprecationsMigrationKey-1) defer cleanup() // Insert fixtures to satisfy foreign key constraints @@ -32,6 +32,7 @@ func TestRmTruncatedDeprecations(t *testing.T) { // Add a truncated bundle; i.e. doesn't exist in the channel_entry table _, err = db.Exec(insertDeprecated, "operator.v1.0.0-pre") + require.NoError(t, err) // This migration should delete all bundles that are not referenced by the channel_entry table require.NoError(t, migrator.Up(context.Background(), migrations.Only(migrations.RmTruncatedDeprecationsMigrationKey))) diff --git a/pkg/sqlite/migrator_test.go b/pkg/sqlite/migrator_test.go index 192238b5b..1fcd7e2e5 100644 --- a/pkg/sqlite/migrator_test.go +++ b/pkg/sqlite/migrator_test.go @@ -122,7 +122,7 @@ func TestSQLLiteMigrator_Down(t *testing.T) { t.Run(tt.name, func(t *testing.T) { up = false down = false - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() m := &SQLLiteMigrator{ db: db, @@ -280,7 +280,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { t.Run(tt.name, func(t *testing.T) { up = 0 down = false - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() m := &SQLLiteMigrator{ db: db, @@ -396,7 +396,7 @@ func TestSQLLiteMigrator_Migrate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { up = 0 down = 0 - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() m := &SQLLiteMigrator{ db: db, diff --git a/pkg/sqlite/query_sql_test.go b/pkg/sqlite/query_sql_test.go index 83aafba3a..224b98297 100644 --- a/pkg/sqlite/query_sql_test.go +++ b/pkg/sqlite/query_sql_test.go @@ -212,7 +212,7 @@ func TestListBundlesQuery(t *testing.T) { t.Run(tt.Name, func(t *testing.T) { ctx := context.Background() - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) diff --git a/pkg/sqlite/remove_test.go b/pkg/sqlite/remove_test.go index 72f67862c..782b1bf5e 100644 --- a/pkg/sqlite/remove_test.go +++ b/pkg/sqlite/remove_test.go @@ -14,7 +14,7 @@ import ( func TestRemover(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) diff --git a/pkg/sqlite/stranded_test.go b/pkg/sqlite/stranded_test.go index 30a4979d1..c39081867 100644 --- a/pkg/sqlite/stranded_test.go +++ b/pkg/sqlite/stranded_test.go @@ -14,7 +14,7 @@ import ( func TestStrandedBundleRemover(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - db, cleanup := CreateTestDb(t) + db, cleanup := CreateTestDB(t) defer cleanup() store, err := NewSQLLiteLoader(db) require.NoError(t, err) @@ -43,7 +43,7 @@ func TestStrandedBundleRemover(t *testing.T) { querier := NewSQLLiteQuerierFromDb(db) packageBundles, err := querier.GetBundlesForPackage(context.TODO(), "prometheus") require.NoError(t, err) - require.Equal(t, 1, len(packageBundles)) + require.Len(t, packageBundles, 1) rows, err := db.QueryContext(context.TODO(), "select * from operatorbundle") require.NoError(t, err) @@ -63,7 +63,7 @@ func TestStrandedBundleRemover(t *testing.T) { // other bundles in the package still exist, but the bundle is removed packageBundles, err = querier.GetBundlesForPackage(context.TODO(), "prometheus") require.NoError(t, err) - require.Equal(t, 1, len(packageBundles)) + require.Len(t, packageBundles, 1) rows, err = db.QueryContext(context.TODO(), "select * from operatorbundle") require.NoError(t, err) diff --git a/test/e2e/bundle_image_test.go b/test/e2e/bundle_image_test.go index cf48dd35e..dffdd2607 100644 --- a/test/e2e/bundle_image_test.go +++ b/test/e2e/bundle_image_test.go @@ -276,7 +276,7 @@ func pushLoadImages(client *kubernetes.Clientset, w io.Writer, images ...string) } } else { for _, image := range images { - pushWith("docker", image) + err = pushWith("docker", image) if err != nil { return err }