From 8c2ee43e3d3be2cfea77c13fec2b977696e62b27 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 10:21:51 -0400 Subject: [PATCH 01/17] conversion commit --- Gopkg.lock | 20 +- Gopkg.toml | 6 +- api/external/kubernetes/solo-kit.json | 85 +- api/multicluster/v1/solo-kit.json | 43 +- pkg/api/v1/clients/kube/crd/crd.go | 61 +- pkg/api/v1/clients/kube/crd/registry.go | 33 + pkg/api/v1/clients/kube/crd/util.go | 17 + .../kubernetes.solo.io_suite_test.go | 8 +- pkg/code-generator/cmd/main.go | 223 +++-- pkg/code-generator/codegen/conversion.go | 165 ++++ .../codegen/{generator.go => project.go} | 59 +- pkg/code-generator/codegen/resource_group.go | 76 ++ .../codegen/templates/converter_template.go | 104 +++ .../templates/converter_test_template.go | 81 ++ .../codegen/templates/event_loop_template.go | 6 +- .../templates/event_loop_test_template.go | 2 +- pkg/code-generator/codegen/templates/funcs.go | 3 + .../templates/resource_client_template.go | 2 +- .../resource_client_test_template.go | 2 +- .../templates/resource_reconciler_template.go | 2 +- .../codegen/templates/resource_template.go | 9 +- .../templates/simple_event_loop_template.go | 6 +- .../templates/simple_test_suite_template.go | 21 + .../templates/snapshot_emitter_template.go | 2 +- .../snapshot_emitter_test_template.go | 4 +- .../snapshot_simple_emitter_template.go | 2 +- .../codegen/templates/snapshot_template.go | 2 +- .../codegen/templates/test_suite_template.go | 6 +- .../codegen/templates/xds_template.go | 2 +- .../docgen/funcs/template_funcs.go | 24 +- pkg/code-generator/docgen/generator.go | 8 +- .../templates/markdown/project_template.go | 10 +- .../templates/markdown/proto_file_template.go | 2 +- .../restructured/project_template.go | 12 +- .../restructured/proto_file_template.go | 2 +- pkg/code-generator/model/conversion.go | 15 + pkg/code-generator/model/project.go | 119 ++- pkg/code-generator/model/simple_test_suite.go | 5 + pkg/code-generator/parser/parser.go | 25 +- pkg/code-generator/parser/parser_resource.go | 57 +- pkg/code-generator/parser/parser_xds.go | 4 +- .../group/kubeconfigs_event_loop.sk.go | 28 +- .../group/kubeconfigs_event_loop_test.go | 24 +- .../group/kubeconfigs_simple_event_loop.sk.go | 42 +- .../group/kubeconfigs_snapshot.sk.go | 60 ++ .../group/kubeconfigs_snapshot_emitter.sk.go | 175 ++++ .../kubeconfigs_snapshot_emitter_test.go | 202 +++++ .../kubeconfigs_snapshot_simple_emitter.sk.go | 38 +- .../v1/kubeconfigs_event_loop.sk.go | 6 +- .../v1/kubeconfigs_event_loop_test.go | 2 +- .../v1/kubeconfigs_simple_event_loop.sk.go | 6 +- .../v1/kubeconfigs_snapshot.sk.go | 2 +- .../v1/kubeconfigs_snapshot_emitter.sk.go | 2 +- .../v1/kubeconfigs_snapshot_emitter_test.go | 4 +- .../kubeconfigs_snapshot_simple_emitter.sk.go | 2 +- .../v1/multicluster.solo.io_suite_test.go | 8 +- test/mocks/api/solo-kit.json | 63 ++ test/mocks/api/v1/mock_resources.proto | 1 + test/mocks/api/v1/solo-kit.json | 45 - test/mocks/api/v1alpha1/solo-kit.json | 15 - test/mocks/api/v2alpha1/solo-kit.json | 19 - .../mocks/conversion/conversion_suite_test.go | 15 + .../mocks/conversion/resource_converter.sk.go | 143 +++ .../conversion/resource_converter_test.go | 110 +++ .../envoy/api/v2/core/base.proto.sk.md | 367 ++++++++ .../envoy/api/v2/discovery.proto.sk.md | 179 ++++ .../external/envoy/type/percent.proto.sk.md | 81 ++ .../api/external/gogoproto/gogo.proto.sk.md | 44 + .../google/api/annotations.proto.sk.md | 30 + .../api/external/google/api/http.proto.sk.md | 331 +++++++ .../external/google/protobuf/any.proto.sk.md | 141 +++ .../google/protobuf/duration.proto.sk.md | 130 +++ .../google/protobuf/empty.proto.sk.md | 77 ++ .../google/protobuf/struct.proto.sk.md | 151 ++++ .../google/protobuf/timestamp.proto.sk.md | 148 ++++ .../google/protobuf/wrappers.proto.sk.md | 245 ++++++ .../external/google/rpc/status.proto.sk.md | 110 +++ .../solo-kit/api/external/metrics.proto.sk.md | 266 ++++++ .../solo-kit/api/external/trace.proto.sk.md | 467 ++++++++++ .../solo-kit/api/v1/metadata.proto.sk.md | 50 ++ .../solo-io/solo-kit/api/v1/ref.proto.sk.md | 42 + .../solo-kit/api/v1/solo-kit.proto.sk.md | 44 + .../solo-kit/api/v1/status.proto.sk.md | 62 ++ .../api/v1/apiserver/api_server.proto.sk.md | 247 ++++++ .../mocks/api/v1/mock_resources.proto.sk.md | 95 ++ .../api/v1/more_mock_resources.proto.sk.md | 65 ++ .../api/v1alpha1/mock_resources.proto.sk.md | 74 ++ .../api/v2alpha1/mock_resources.proto.sk.md | 49 ++ .../google/protobuf/descriptor.proto.sk.md | 825 ++++++++++++++++++ test/mocks/docs/testing.solo.io.project.sk.md | 21 + .../{v1 => group}/testing_event_loop.sk.go | 6 +- .../{v1 => group}/testing_event_loop_test.go | 8 +- .../testing_simple_event_loop.sk.go | 6 +- .../{v1 => group}/testing_snapshot.sk.go | 6 +- .../testing_snapshot_emitter.sk.go | 20 +- .../testing_snapshot_emitter_test.go | 52 +- .../testing_snapshot_simple_emitter.sk.go | 8 +- test/mocks/v1/fake_resource.sk.go | 12 + test/mocks/v1/fake_resource_client_test.go | 1 + test/mocks/v1/mock_resources.pb.go | 90 +- test/mocks/v1/more_mock_resources.pb.go | 3 +- test/mocks/v1/testing.solo.io_suite_test.go | 8 +- test/mocks/v1alpha1/fake_resource.sk.go | 2 +- test/mocks/v1alpha1/mock_resource.sk.go | 2 +- test/mocks/v1alpha1/mock_resources.pb.go | 3 +- test/mocks/v1alpha1/more_mock_resources.pb.go | 238 ----- .../v1alpha1/testing.solo.io_suite_test.go | 8 +- .../v1alpha1/testing_simple_event_loop.sk.go | 122 --- test/mocks/v1alpha1/testing_snapshot.sk.go | 60 -- .../v1alpha1/testing_snapshot_emitter.sk.go | 175 ---- .../v1alpha1/testing_snapshot_emitter_test.go | 208 ----- test/mocks/v2alpha1/mock_resources.pb.go | 3 +- .../v2alpha1/testing.solo.io_suite_test.go | 8 +- test/mocks/v2alpha1/testing_event_loop.sk.go | 93 -- .../mocks/v2alpha1/testing_event_loop_test.go | 73 -- test/mocks/v2alpha1/testing_snapshot.sk.go | 77 -- .../v2alpha1/testing_snapshot_emitter.sk.go | 223 ----- .../v2alpha1/testing_snapshot_emitter_test.go | 333 ------- .../testing_snapshot_simple_emitter.sk.go | 104 --- 119 files changed, 6309 insertions(+), 2331 deletions(-) create mode 100644 pkg/api/v1/clients/kube/crd/util.go create mode 100644 pkg/code-generator/codegen/conversion.go rename pkg/code-generator/codegen/{generator.go => project.go} (64%) create mode 100644 pkg/code-generator/codegen/resource_group.go create mode 100644 pkg/code-generator/codegen/templates/converter_template.go create mode 100644 pkg/code-generator/codegen/templates/converter_test_template.go create mode 100644 pkg/code-generator/codegen/templates/simple_test_suite_template.go create mode 100644 pkg/code-generator/model/conversion.go create mode 100644 pkg/code-generator/model/simple_test_suite.go rename test/mocks/v1alpha1/testing_event_loop.sk.go => pkg/multicluster/group/kubeconfigs_event_loop.sk.go (66%) rename test/mocks/v1alpha1/testing_event_loop_test.go => pkg/multicluster/group/kubeconfigs_event_loop_test.go (58%) rename test/mocks/v2alpha1/testing_simple_event_loop.sk.go => pkg/multicluster/group/kubeconfigs_simple_event_loop.sk.go (64%) create mode 100644 pkg/multicluster/group/kubeconfigs_snapshot.sk.go create mode 100644 pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go create mode 100644 pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go rename test/mocks/v1alpha1/testing_snapshot_simple_emitter.sk.go => pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go (54%) create mode 100644 test/mocks/api/solo-kit.json delete mode 100644 test/mocks/api/v1/solo-kit.json delete mode 100644 test/mocks/api/v1alpha1/solo-kit.json delete mode 100644 test/mocks/api/v2alpha1/solo-kit.json create mode 100644 test/mocks/conversion/conversion_suite_test.go create mode 100644 test/mocks/conversion/resource_converter.sk.go create mode 100644 test/mocks/conversion/resource_converter_test.go create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/core/base.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/gogoproto/gogo.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/annotations.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/http.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/duration.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/empty.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/struct.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/timestamp.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/wrappers.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/metrics.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/trace.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/v1/ref.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto.sk.md create mode 100644 test/mocks/docs/google/protobuf/descriptor.proto.sk.md create mode 100644 test/mocks/docs/testing.solo.io.project.sk.md rename test/mocks/{v1 => group}/testing_event_loop.sk.go (93%) rename test/mocks/{v1 => group}/testing_event_loop_test.go (88%) rename test/mocks/{v1 => group}/testing_simple_event_loop.sk.go (95%) rename test/mocks/{v1 => group}/testing_snapshot.sk.go (93%) rename test/mocks/{v1 => group}/testing_snapshot_emitter.sk.go (93%) rename test/mocks/{v1 => group}/testing_snapshot_emitter_test.go (91%) rename test/mocks/{v1 => group}/testing_snapshot_simple_emitter.sk.go (92%) delete mode 100644 test/mocks/v1alpha1/more_mock_resources.pb.go delete mode 100644 test/mocks/v1alpha1/testing_simple_event_loop.sk.go delete mode 100644 test/mocks/v1alpha1/testing_snapshot.sk.go delete mode 100644 test/mocks/v1alpha1/testing_snapshot_emitter.sk.go delete mode 100644 test/mocks/v1alpha1/testing_snapshot_emitter_test.go delete mode 100644 test/mocks/v2alpha1/testing_event_loop.sk.go delete mode 100644 test/mocks/v2alpha1/testing_event_loop_test.go delete mode 100644 test/mocks/v2alpha1/testing_snapshot.sk.go delete mode 100644 test/mocks/v2alpha1/testing_snapshot_emitter.sk.go delete mode 100644 test/mocks/v2alpha1/testing_snapshot_emitter_test.go delete mode 100644 test/mocks/v2alpha1/testing_snapshot_simple_emitter.sk.go diff --git a/Gopkg.lock b/Gopkg.lock index febf577a4..fa6d17ad6 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -562,7 +562,7 @@ version = "v0.1" [[projects]] - digest = "1:46b816b3c115aa07ca2537c0870e9e1f35c3b6920563b665dcbabaa510d1eb6c" + digest = "1:d41d48ce8186f6a3e0a4df90def7a583d85e56a21d58c4b9009afaa4daaad6de" name = "github.com/solo-io/go-utils" packages = [ "clicore/constants", @@ -577,10 +577,11 @@ "testutils", "testutils/clusterlock", "testutils/kube", + "versionutils/kubeapi", ] pruneopts = "UT" - revision = "1c2dd400703ec1a7340609a6f5c00653ecf3d5c7" - version = "v0.9.1" + revision = "95cd6da41a4f69714768061dd06b75b4ec5636cb" + version = "v0.9.4" [[projects]] digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" @@ -839,6 +840,15 @@ revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" version = "v1.13.0" +[[projects]] + digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "UT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify.git" + version = "v1.4.7" + [[projects]] digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" name = "gopkg.in/fsnotify/fsnotify.v1" @@ -1268,6 +1278,7 @@ "github.com/solo-io/go-utils/testutils", "github.com/solo-io/go-utils/testutils/clusterlock", "github.com/solo-io/go-utils/testutils/kube", + "github.com/solo-io/go-utils/versionutils/kubeapi", "go.opencensus.io/stats", "go.opencensus.io/stats/view", "go.opencensus.io/tag", @@ -1280,6 +1291,7 @@ "google.golang.org/grpc/codes", "google.golang.org/grpc/metadata", "google.golang.org/grpc/status", + "gopkg.in/fsnotify.v1", "k8s.io/api/apps/v1", "k8s.io/api/core/v1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", @@ -1287,7 +1299,9 @@ "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/conversion", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", diff --git a/Gopkg.toml b/Gopkg.toml index 2b4089ecf..5d6343496 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -17,6 +17,10 @@ required = ["k8s.io/code-generator/cmd/client-gen"] name = "k8s.io/gengo" unused-packages = false +[[override]] + name = "gopkg.in/fsnotify.v1" + source = "https://github.com/fsnotify/fsnotify.git" + [[constraint]] version = "1.1.1" name = "github.com/gogo/protobuf" @@ -31,7 +35,7 @@ required = ["k8s.io/code-generator/cmd/client-gen"] [[constraint]] name = "github.com/solo-io/go-utils" - version = "0.9.1" + version = "0.9.4" [[constraint]] version = ">=0.8.0" diff --git a/api/external/kubernetes/solo-kit.json b/api/external/kubernetes/solo-kit.json index 91719e43f..1b77e5656 100644 --- a/api/external/kubernetes/solo-kit.json +++ b/api/external/kubernetes/solo-kit.json @@ -1,44 +1,53 @@ { "title": "solo-kit", - "name": "kubernetes.solo.io", - "version": "kubernetes", - "go_package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes", - "custom_resources": [ + "api_groups": [ { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/pod", - "type": "Pod", - "plural_name": "pods", - "short_name": "p" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/namespace", - "type": "KubeNamespace", - "plural_name": "kubenamespaces", - "short_name": "kn" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/configmap", - "type": "ConfigMap", - "plural_name": "configmaps", - "short_name": "cm" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/service", - "type": "Service", - "plural_name": "services", - "short_name": "svc" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/deployment", - "type": "Deployment", - "plural_name": "deployments", - "short_name": "dp " - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/customresourcedefinition", - "type": "CustomResourceDefinition", - "plural_name": "customresourcedefinition", - "short_name": "crd " + "name": "kubernetes.solo.io", + "resource_group_go_package": "github.com/solo-io/solo-kit/api/external/kubernetes/group", + "version_configs": [ + { + "version": "kubernetes", + "go_package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/pod", + "type": "Pod", + "plural_name": "pods", + "short_name": "p" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/namespace", + "type": "KubeNamespace", + "plural_name": "kubenamespaces", + "short_name": "kn" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/configmap", + "type": "ConfigMap", + "plural_name": "configmaps", + "short_name": "cm" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/service", + "type": "Service", + "plural_name": "services", + "short_name": "svc" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/deployment", + "type": "Deployment", + "plural_name": "deployments", + "short_name": "dp " + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/customresourcedefinition", + "type": "CustomResourceDefinition", + "plural_name": "customresourcedefinition", + "short_name": "crd " + } + ] + } + ] } ] } \ No newline at end of file diff --git a/api/multicluster/v1/solo-kit.json b/api/multicluster/v1/solo-kit.json index 949f25074..11b9f11d3 100644 --- a/api/multicluster/v1/solo-kit.json +++ b/api/multicluster/v1/solo-kit.json @@ -1,22 +1,31 @@ { "title": "Solo-Kit Multicluster Manager", - "name": "multicluster.solo.io", - "version": "v1", - "go_package": "github.com/solo-io/solo-kit/pkg/multicluster/v1", - "custom_resources": [ + "api_groups": [ { - "package": "github.com/solo-io/solo-kit/api/multicluster/v1", - "type": "KubeConfig", - "plural_name": "kubeconfigs", - "short_name": "kc" + "name": "multicluster.solo.io", + "resource_group_go_package": "github.com/solo-io/solo-kit/pkg/multicluster/group", + "resource_groups": { + "kubeconfigs.multicluster.solo.io": [ + { + "name": "KubeConfig", + "package": "github.com/solo-io/solo-kit/api/multicluster/v1" + } + ] + }, + "version_configs": [ + { + "version": "v1", + "go_package": "github.com/solo-io/solo-kit/pkg/multicluster/v1", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/api/multicluster/v1", + "type": "KubeConfig", + "plural_name": "kubeconfigs", + "short_name": "kc" + } + ] + } + ] } - ], - "resource_groups": { - "kubeconfigs.multicluster.solo.io": [ - { - "name": "KubeConfig", - "package": "github.com/solo-io/solo-kit/api/multicluster/v1" - } - ] - } + ] } diff --git a/pkg/api/v1/clients/kube/crd/crd.go b/pkg/api/v1/clients/kube/crd/crd.go index 83911a5c3..948e007a7 100644 --- a/pkg/api/v1/clients/kube/crd/crd.go +++ b/pkg/api/v1/clients/kube/crd/crd.go @@ -1,14 +1,13 @@ package crd import ( - "fmt" "log" "sync" + "github.com/solo-io/go-utils/errors" "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/client/clientset/versioned/scheme" v1 "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/solo.io/v1" "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/utils/protoutils" apiexts "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -16,7 +15,22 @@ import ( ) // TODO(ilackarms): evaluate this fix for concurrent map access in k8s.io/apimachinery/pkg/runtime.SchemaBuider -var registerLock sync.Mutex +var ( + registerLock sync.Mutex + + VersionNotFoundError = func(version string) error { + return errors.Errorf("could not find version %v", version) + } +) + +type Converter interface { + Convert(src SoloKitCrd, dst SoloKitCrd) error +} + +type SoloKitCrd interface { + runtime.Object + resources.Resource +} type CrdMeta struct { Plural string @@ -28,7 +42,7 @@ type CrdMeta struct { type Version struct { Version string - Type runtime.Object + Type SoloKitCrd } type Crd struct { @@ -41,6 +55,15 @@ type MultiVersionCrd struct { Versions []Version } +func (m *MultiVersionCrd) GetVersion(requested string) (*Version, error) { + for _, version := range m.Versions { + if version.Version == requested { + return &version, nil + } + } + return nil, VersionNotFoundError(requested) +} + func NewCrd( plural string, group string, @@ -48,7 +71,7 @@ func NewCrd( kindName string, shortName string, clusterScoped bool, - objType runtime.Object) Crd { + objType SoloKitCrd) Crd { c := Crd{ CrdMeta: CrdMeta{ Plural: plural, @@ -65,6 +88,12 @@ func NewCrd( if err := c.AddToScheme(scheme.Scheme); err != nil { log.Panicf("error while adding [%v] CRD to scheme: %v", c.FullName(), err) } + // if res, ok := objType.(resources.Resource); ok { + // c.Version.ProtoSpec = res + // } else { + // log.Panicf("error while creating crd for %v, must extend " + + // "resources.Resource interface", c.FullName()) + // } return c } @@ -73,25 +102,9 @@ func (d Crd) Register(apiexts apiexts.Interface) error { } func (d Crd) KubeResource(resource resources.InputResource) *v1.Resource { - data, err := protoutils.MarshalMap(resource) - if err != nil { - panic(fmt.Sprintf("internal error: failed to marshal resource to map: %v", err)) - } - delete(data, "metadata") - delete(data, "status") - spec := v1.Spec(data) - return &v1.Resource{ - TypeMeta: d.TypeMeta(), - ObjectMeta: metav1.ObjectMeta{ - Namespace: resource.GetMetadata().Namespace, - Name: resource.GetMetadata().Name, - ResourceVersion: resource.GetMetadata().ResourceVersion, - Labels: resource.GetMetadata().Labels, - Annotations: resource.GetMetadata().Annotations, - }, - Status: resource.GetStatus(), - Spec: &spec, - } + res := KubeResource(resource) + res.TypeMeta = d.TypeMeta() + return res } func (d CrdMeta) FullName() string { diff --git a/pkg/api/v1/clients/kube/crd/registry.go b/pkg/api/v1/clients/kube/crd/registry.go index 6f5fb4838..cea3f8e5a 100644 --- a/pkg/api/v1/clients/kube/crd/registry.go +++ b/pkg/api/v1/clients/kube/crd/registry.go @@ -6,6 +6,9 @@ import ( "github.com/solo-io/go-utils/errors" "github.com/solo-io/go-utils/kubeutils" + v1 "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/solo.io/v1" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "github.com/solo-io/solo-kit/pkg/utils/protoutils" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiexts "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +49,10 @@ func AddCrd(resource Crd) error { return getRegistry().addCrd(resource) } +func GetMultiVersionCrd(gk schema.GroupKind) (MultiVersionCrd, error) { + return getRegistry().getMultiVersionCrd(gk) +} + func (r *crdRegistry) addCrd(resource Crd) error { r.mu.Lock() defer r.mu.Unlock() @@ -149,3 +156,29 @@ func (r crdRegistry) getKubeCrd(crd MultiVersionCrd, gvk schema.GroupVersionKind }, }, nil } + +func KubeResource(resource resources.Resource) *v1.Resource { + data, err := protoutils.MarshalMap(resource) + if err != nil { + panic(fmt.Sprintf("internal error: failed to marshal resource to map: %v", err)) + } + delete(data, "metadata") + delete(data, "status") + spec := v1.Spec(data) + res := &v1.Resource{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: resource.GetMetadata().Namespace, + Name: resource.GetMetadata().Name, + ResourceVersion: resource.GetMetadata().ResourceVersion, + Labels: resource.GetMetadata().Labels, + Annotations: resource.GetMetadata().Annotations, + }, + Spec: &spec, + } + + if withStatus, ok := resource.(resources.InputResource); ok { + res.Status = withStatus.GetStatus() + } + + return res +} diff --git a/pkg/api/v1/clients/kube/crd/util.go b/pkg/api/v1/clients/kube/crd/util.go new file mode 100644 index 000000000..b4f2b8580 --- /dev/null +++ b/pkg/api/v1/clients/kube/crd/util.go @@ -0,0 +1,17 @@ +package crd + +import ( + "github.com/solo-io/solo-kit/pkg/utils/protoutils" +) + +func Copy(src, dst SoloKitCrd) error { + srcBytes, err := protoutils.MarshalBytes(src) + if err != nil { + return err + } + err = protoutils.UnmarshalBytes(srcBytes, dst) + if err != nil { + return err + } + return nil +} diff --git a/pkg/api/v1/resources/common/kubernetes/kubernetes.solo.io_suite_test.go b/pkg/api/v1/resources/common/kubernetes/kubernetes.solo.io_suite_test.go index 034e96974..3cc5d16d6 100644 --- a/pkg/api/v1/resources/common/kubernetes/kubernetes.solo.io_suite_test.go +++ b/pkg/api/v1/resources/common/kubernetes/kubernetes.solo.io_suite_test.go @@ -36,13 +36,13 @@ var ( Expect(err).NotTo(HaveOccurred()) clientset, err := apiexts.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) - testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("fakes.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) + testutils.ErrorNotOccuredOrNotFound(err) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index 3ef86c8a7..8f5f8476b 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -76,17 +76,17 @@ func Generate(opts GenerateOptions) error { return err } - // Creates a ProjectConfig from each of the 'solo-kit.json' files + // Creates a VersionConfig from each of the 'solo-kit.json' files // found in the directory tree rooted at 'absoluteRoot'. - projectConfigs, err := collectProjectsFromRoot(absoluteRoot, skipDirs) + soloKitProjects, err := collectProjectsFromRoot(absoluteRoot, skipDirs) if err != nil { return err } log.Printf("collected projects: %v", func() []string { var names []string - for _, project := range projectConfigs { - names = append(names, project.Name) + for _, skp := range soloKitProjects { + names = append(names, skp.Title) } sort.Strings(names) return names @@ -97,9 +97,13 @@ func Generate(opts GenerateOptions) error { if !compileProtos { return false } - for _, proj := range projectConfigs { - if strings.HasPrefix(protoFile, filepath.Dir(proj.ProjectFile)) { - return true + for _, skp := range soloKitProjects { + for _, ag := range skp.ApiGroups { + for _, vc := range ag.VersionConfigs { + if strings.HasPrefix(protoFile, filepath.Dir(skp.ProjectFile)+"/"+vc.Version) { + return true + } + } } } return false @@ -122,80 +126,125 @@ func Generate(opts GenerateOptions) error { }()) var protoDescriptors []*descriptor.FileDescriptorProto - for _, projectConfig := range projectConfigs { - importedResources, err := importCustomResources(projectConfig.Imports) - if err != nil { - return err - } - - projectConfig.CustomResources = append(projectConfig.CustomResources, importedResources...) - - for _, desc := range descriptors { - if filepath.Dir(desc.ProtoFilePath) == filepath.Dir(projectConfig.ProjectFile) { - projectConfig.ProjectProtos = append(projectConfig.ProjectProtos, desc.GetName()) + for _, skp := range soloKitProjects { + for _, ag := range skp.ApiGroups { + importedResources, err := importCustomResources(ag.Imports) + if err != nil { + return err + } + for _, vc := range ag.VersionConfigs { + vc.CustomResources = append(vc.CustomResources, importedResources...) + for _, vc := range ag.VersionConfigs { + for _, desc := range descriptors { + if filepath.Dir(desc.ProtoFilePath) == filepath.Dir(skp.ProjectFile)+"/"+vc.Version { + vc.VersionProtos = append(vc.VersionProtos, desc.GetName()) + } + protoDescriptors = append(protoDescriptors, desc.FileDescriptorProto) + } + } } - protoDescriptors = append(protoDescriptors, desc.FileDescriptorProto) } } - for _, projectConfig := range projectConfigs { + for _, skp := range soloKitProjects { + for _, ag := range skp.ApiGroups { + ag.SoloKitProject = skp + // Store all projects for conversion generation. + var apiGroupVersions []*model.Version + for _, vc := range ag.VersionConfigs { + vc.ApiGroup = ag - // Build a 'Project' object that contains a resource for each message that: - // - is contained in the FileDescriptor and - // - is a solo kit resource (i.e. it has a field named 'metadata') + // Build a 'Version' object that contains a resource for each message that: + // - is contained in the FileDescriptor and + // - is a solo kit resource (i.e. it has a field named 'metadata') - project, err := parser.ProcessDescriptors(projectConfig, projectConfigs, protoDescriptors) - if err != nil { - return err - } + version, err := parser.ProcessDescriptors(vc, ag, protoDescriptors) + if err != nil { + return err + } + apiGroupVersions = append(apiGroupVersions, version) - code, err := codegen.GenerateFiles(project, true, opts.SkipGeneratedTests) - if err != nil { - return err - } + code, err := codegen.GenerateProjectFiles(version, true, opts.SkipGeneratedTests) + if err != nil { + return err + } - if project.ProjectConfig.DocsDir != "" && (genDocs != nil) { - docs, err := docgen.GenerateFiles(project, genDocs) - if err != nil { - return err + outDir := filepath.Join(gopathSrc(), version.VersionConfig.GoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err + } + + genDocs = &DocsOptions{} + if ag.DocsDir != "" && (genDocs != nil) { + docs, err := docgen.GenerateFiles(version, genDocs) + if err != nil { + return err + } + + for _, file := range docs { + path := filepath.Join(absoluteRoot, ag.DocsDir, file.Filename) + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { + return err + } + } + } + + // Generate mocks + // need to run after to make sure all resources have already been written + // Set this env var during tests so that mocks are not generated + if !opts.SkipGenMocks { + if err := genMocks(code, outDir, absoluteRoot); err != nil { + return err + } + } } - for _, file := range docs { - path := filepath.Join(absoluteRoot, project.ProjectConfig.DocsDir, file.Filename) - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + if ag.ResourceGroupGoPackage != "" { + var allResources []*model.Resource + for _, v := range apiGroupVersions { + allResources = append(allResources, v.Resources...) + } + ag.ResourceGroupsFoo, err = parser.GetResourceGroups(ag, allResources) + if err != nil { return err } - if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { + + code, err := codegen.GenerateResourceGroupFiles(ag, true, opts.SkipGeneratedTests) + if err != nil { return err } - } - } - outDir := filepath.Join(gopathSrc(), project.ProjectConfig.GoPackage) + outDir := filepath.Join(gopathSrc(), ag.ResourceGroupGoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err + } - for _, file := range code { - path := filepath.Join(outDir, file.Filename) - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { - return err - } - if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { - return err - } - if out, err := exec.Command("gofmt", "-w", path).CombinedOutput(); err != nil { - return errors.Wrapf(err, "gofmt failed: %s", out) + // Generate mocks + // need to run after to make sure all resources have already been written + // Set this env var during tests so that mocks are not generated + if !opts.SkipGenMocks { + if err := genMocks(code, outDir, absoluteRoot); err != nil { + return err + } + } } - if out, err := exec.Command("goimports", "-w", path).CombinedOutput(); err != nil { - return errors.Wrapf(err, "goimports failed: %s", out) - } - } + if ag.ConversionGoPackage != "" { + goPackageSegments := strings.Split(ag.ConversionGoPackage, "/") + ag.ConversionGoPackageShort = goPackageSegments[len(goPackageSegments)-1] - // Generate mocks - // need to run after to make sure all resources have already been written - // Set this env var during tests so that mocks are not generated - if !opts.SkipGenMocks { - if err := genMocks(code, outDir, absoluteRoot); err != nil { - return err + code, err := codegen.GenerateConversionFiles(ag, apiGroupVersions) + if err != nil { + return err + } + + outDir := filepath.Join(gopathSrc(), ag.ConversionGoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err + } } } } @@ -254,8 +303,8 @@ func gopathSrc() string { return filepath.Join(os.Getenv("GOPATH"), "src") } -func collectProjectsFromRoot(root string, skipDirs []string) ([]*model.ProjectConfig, error) { - var projects []*model.ProjectConfig +func collectProjectsFromRoot(root string, skipDirs []string) ([]*model.SoloKitProject, error) { + var soloKitProjects []*model.SoloKitProject if err := filepath.Walk(root, func(projectFile string, info os.FileInfo, err error) error { if err != nil { @@ -279,12 +328,13 @@ func collectProjectsFromRoot(root string, skipDirs []string) ([]*model.ProjectCo if err != nil { return err } - projects = append(projects, &project) + + soloKitProjects = append(soloKitProjects, &project) return nil }); err != nil { return nil, err } - return projects, nil + return soloKitProjects, nil } func addDescriptorsForFile(addDescriptor func(f DescriptorWithPath), root, protoFile string, customImports, customGogoArgs []string, wantCompile func(string) bool) error { @@ -539,19 +589,44 @@ func importCustomResources(imports []string) ([]model.CustomResourceConfig, erro if err != nil { return nil, err } - var projectConfig model.ProjectConfig - err = json.Unmarshal(byt, &projectConfig) + + var soloKitProject model.SoloKitProject + err = json.Unmarshal(byt, &soloKitProject) if err != nil { return nil, err } - var customResources []model.CustomResourceConfig - for _, v := range projectConfig.CustomResources { - v.Package = projectConfig.GoPackage - v.Imported = true - customResources = append(customResources, v) + for _, ag := range soloKitProject.ApiGroups { + for _, vc := range ag.VersionConfigs { + var customResources []model.CustomResourceConfig + for _, cr := range vc.CustomResources { + cr.Package = ag.ResourceGroupGoPackage + cr.Imported = true + customResources = append(customResources, cr) + } + results = append(results, customResources...) + } } - results = append(results, customResources...) } return results, nil } + +func writeCodeFiles(code code_generator.Files, outDir string) error { + for _, file := range code { + path := filepath.Join(outDir, file.Filename) + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { + return err + } + if out, err := exec.Command("gofmt", "-w", path).CombinedOutput(); err != nil { + return errors.Wrapf(err, "gofmt failed: %s", out) + } + + if out, err := exec.Command("goimports", "-w", path).CombinedOutput(); err != nil { + return errors.Wrapf(err, "goimports failed: %s", out) + } + } + return nil +} diff --git a/pkg/code-generator/codegen/conversion.go b/pkg/code-generator/codegen/conversion.go new file mode 100644 index 000000000..2041a4672 --- /dev/null +++ b/pkg/code-generator/codegen/conversion.go @@ -0,0 +1,165 @@ +package codegen + +import ( + "bytes" + "sort" + "text/template" + + "github.com/solo-io/go-utils/versionutils/kubeapi" + "github.com/solo-io/solo-kit/pkg/errors" + + "github.com/solo-io/go-utils/log" + code_generator "github.com/solo-io/solo-kit/pkg/code-generator" + "github.com/solo-io/solo-kit/pkg/code-generator/codegen/templates" + "github.com/solo-io/solo-kit/pkg/code-generator/model" +) + +func GenerateConversionFiles(soloKitProject *model.ApiGroup, projects []*model.Version) (code_generator.Files, error) { + var files code_generator.Files + + sort.SliceStable(projects, func(i, j int) bool { + vi, err := kubeapi.ParseVersion(projects[i].VersionConfig.Version) + if err != nil { + return false + } + vj, err := kubeapi.ParseVersion(projects[j].VersionConfig.Version) + if err != nil { + return false + } + return vi.LessThan(vj) + }) + + resourceNameToProjects := make(map[string][]*model.Version) + + for index, project := range projects { + for _, res := range project.Resources { + // only generate files for the resources in our group, otherwise we import + if !project.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { + log.Printf("not generating solo-kit "+ + "clients for resource %v.%v, "+ + "resource proto package must match project proto package %v", res.ProtoPackage, res.Name, project.ProtoPackage) + continue + } else if res.IsCustom && res.CustomResource.Imported { + log.Printf("not generating solo-kit "+ + "clients for resource %v.%v, "+ + "custom resources from a different project are not generated", res.GoPackage, res.Name, project.VersionConfig.GoPackage) + continue + } + + if _, found := resourceNameToProjects[res.Name]; !found { + resourceNameToProjects[res.Name] = make([]*model.Version, 0, len(projects)-index) + } + resourceNameToProjects[res.Name] = append(resourceNameToProjects[res.Name], project) + } + } + + soloKitProject.Conversions = getConversionsFromResourceProjects(resourceNameToProjects) + + fs, err := generateFilesForConversionConfig(soloKitProject) + if err != nil { + return nil, err + } + files = append(files, fs...) + + for i := range files { + files[i].Content = fileHeader + files[i].Content + } + + return files, nil +} + +func getConversionsFromResourceProjects(resNameToProjects map[string][]*model.Version) []*model.Conversion { + conversions := make([]*model.Conversion, 0, len(resNameToProjects)) + for resName, projects := range resNameToProjects { + if len(projects) < 2 { + continue + } + conversion := &model.Conversion{ + Name: resName, + Projects: getConversionProjects(projects), + } + conversions = append(conversions, conversion) + } + + // Sort conversions by name so reordering diffs aren't introduced to the conversion files + sort.SliceStable(conversions, func(i, j int) bool { return conversions[i].Name < conversions[j].Name }) + + return conversions +} + +func generateFilesForConversionConfig(soloKitProject *model.ApiGroup) (code_generator.Files, error) { + var v code_generator.Files + for name, tmpl := range map[string]*template.Template{ + "resource_converter.sk.go": templates.ConverterTemplate, + "resource_converter_test.go": templates.ConverterTestTemplate, + } { + content, err := generateConversionFile(soloKitProject, tmpl) + if err != nil { + return nil, errors.Wrapf(err, "internal error: processing template '%v' for resource list %v failed", tmpl.ParseName, name) + } + v = append(v, code_generator.File{ + Filename: name, + Content: content, + }) + } + + testSuite := &model.TestSuite{ + PackageName: soloKitProject.ConversionGoPackageShort, + } + for suffix, tmpl := range map[string]*template.Template{ + "_suite_test.go": templates.SimpleTestSuiteTemplate, + } { + name := testSuite.PackageName + suffix + content, err := generateTestSuiteFile(testSuite, tmpl) + if err != nil { + return nil, errors.Wrapf(err, "internal error: processing template '%v' for resource list %v failed", tmpl.ParseName, name) + } + v = append(v, code_generator.File{ + Filename: name, + Content: content, + }) + } + + return v, nil +} + +func generateConversionFile(soloKitProject *model.ApiGroup, tmpl *template.Template) (string, error) { + buf := &bytes.Buffer{} + if err := tmpl.Execute(buf, soloKitProject); err != nil { + return "", err + } + return buf.String(), nil +} + +func generateTestSuiteFile(suite *model.TestSuite, tmpl *template.Template) (string, error) { + buf := &bytes.Buffer{} + if err := tmpl.Execute(buf, suite); err != nil { + return "", err + } + return buf.String(), nil +} + +func getConversionProjects(projects []*model.Version) []*model.ConversionProject { + conversionProjects := make([]*model.ConversionProject, 0, len(projects)) + for index := range projects { + conversionProjects = append(conversionProjects, getConversionProject(index, projects)) + } + return conversionProjects +} + +func getConversionProject(index int, projects []*model.Version) *model.ConversionProject { + var nextVersion, previousVersion string + if index < len(projects)-1 { + nextVersion = projects[index+1].VersionConfig.Version + } + if index > 0 { + previousVersion = projects[index-1].VersionConfig.Version + } + + return &model.ConversionProject{ + Version: projects[index].VersionConfig.Version, + GoPackage: projects[index].VersionConfig.GoPackage, + NextVersion: nextVersion, + PreviousVersion: previousVersion, + } +} diff --git a/pkg/code-generator/codegen/generator.go b/pkg/code-generator/codegen/project.go similarity index 64% rename from pkg/code-generator/codegen/generator.go rename to pkg/code-generator/codegen/project.go index 01b572aba..fc2bbb892 100644 --- a/pkg/code-generator/codegen/generator.go +++ b/pkg/code-generator/codegen/project.go @@ -18,14 +18,15 @@ const fileHeader = `// Code generated by solo-kit. DO NOT EDIT. ` -func GenerateFiles(project *model.Project, skipOutOfPackageFiles, skipGeneratedTests bool) (code_generator.Files, error) { +func GenerateProjectFiles(project *model.Version, skipOutOfPackageFiles, skipGeneratedTests bool) (code_generator.Files, error) { files, err := generateFilesForProject(project) if err != nil { return nil, err } + for _, res := range project.Resources { // only generate files for the resources in our group, otherwise we import - if !project.ProjectConfig.IsOurProto(res.Filename) && !res.IsCustom { + if !project.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ "resource proto package must match project proto package %v", res.ProtoPackage, res.Name, project.ProtoPackage) @@ -33,7 +34,7 @@ func GenerateFiles(project *model.Project, skipOutOfPackageFiles, skipGeneratedT } else if res.IsCustom && res.CustomResource.Imported { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ - "custom resources from a different project are not generated", res.GoPackage, res.Name, project.ProjectConfig.GoPackage) + "custom resources from a different project are not generated", res.GoPackage, res.Name, project.VersionConfig.GoPackage) continue } @@ -43,19 +44,9 @@ func GenerateFiles(project *model.Project, skipOutOfPackageFiles, skipGeneratedT } files = append(files, fs...) } - for _, grp := range project.ResourceGroups { - if skipOutOfPackageFiles && !(strings.HasSuffix(grp.Name, "."+project.ProtoPackage) || grp.Name == project.ProtoPackage) { - continue - } - fs, err := generateFilesForResourceGroup(grp) - if err != nil { - return nil, err - } - files = append(files, fs...) - } for _, res := range project.XDSResources { - if skipOutOfPackageFiles && !project.ProjectConfig.IsOurProto(res.Filename) { + if skipOutOfPackageFiles && !project.VersionConfig.IsOurProto(res.Filename) { continue } fs, err := generateFilesForXdsResource(res) @@ -64,6 +55,7 @@ func GenerateFiles(project *model.Project, skipOutOfPackageFiles, skipGeneratedT } files = append(files, fs...) } + for i := range files { files[i].Content = fileHeader + files[i].Content } @@ -117,40 +109,17 @@ func generateFilesForResource(resource *model.Resource) (code_generator.Files, e return v, nil } -func generateFilesForResourceGroup(rg *model.ResourceGroup) (code_generator.Files, error) { - var v code_generator.Files - for suffix, tmpl := range map[string]*template.Template{ - "_snapshot.sk.go": templates.ResourceGroupSnapshotTemplate, - "_snapshot_simple_emitter.sk.go": templates.SimpleEmitterTemplate, - "_snapshot_emitter.sk.go": templates.ResourceGroupEmitterTemplate, - "_snapshot_emitter_test.go": templates.ResourceGroupEmitterTestTemplate, - "_event_loop.sk.go": templates.ResourceGroupEventLoopTemplate, - "_simple_event_loop.sk.go": templates.SimpleEventLoopTemplate, - "_event_loop_test.go": templates.ResourceGroupEventLoopTestTemplate, - } { - content, err := generateResourceGroupFile(rg, tmpl) - if err != nil { - return nil, errors.Wrapf(err, "internal error: processing %template '%v' for resource group %v failed", tmpl.ParseName, rg.Name) - } - v = append(v, code_generator.File{ - Filename: strcase.ToSnake(rg.GoName) + suffix, - Content: content, - }) - } - return v, nil -} - -func generateFilesForProject(project *model.Project) (code_generator.Files, error) { +func generateFilesForProject(project *model.Version) (code_generator.Files, error) { var v code_generator.Files for suffix, tmpl := range map[string]*template.Template{ "_suite_test.go": templates.ProjectTestSuiteTemplate, } { content, err := generateProjectFile(project, tmpl) if err != nil { - return nil, errors.Wrapf(err, "internal error: processing template '%v' for project %v failed", tmpl.ParseName, project.ProjectConfig.Name) + return nil, errors.Wrapf(err, "internal error: processing template '%v' for apigroup %v failed", tmpl.ParseName, project.VersionConfig.ApiGroup.Name) } v = append(v, code_generator.File{ - Filename: strcase.ToSnake(project.ProjectConfig.Name) + suffix, + Filename: strcase.ToSnake(project.VersionConfig.ApiGroup.Name) + suffix, Content: content, }) } @@ -173,15 +142,7 @@ func generateResourceFile(resource *model.Resource, tmpl *template.Template) (st return buf.String(), nil } -func generateResourceGroupFile(rg *model.ResourceGroup, tmpl *template.Template) (string, error) { - buf := &bytes.Buffer{} - if err := tmpl.Execute(buf, rg); err != nil { - return "", err - } - return buf.String(), nil -} - -func generateProjectFile(project *model.Project, tmpl *template.Template) (string, error) { +func generateProjectFile(project *model.Version, tmpl *template.Template) (string, error) { buf := &bytes.Buffer{} if err := tmpl.Execute(buf, project); err != nil { return "", err diff --git a/pkg/code-generator/codegen/resource_group.go b/pkg/code-generator/codegen/resource_group.go new file mode 100644 index 000000000..2e0b3f75f --- /dev/null +++ b/pkg/code-generator/codegen/resource_group.go @@ -0,0 +1,76 @@ +package codegen + +import ( + "bytes" + "strings" + "text/template" + + "github.com/solo-io/solo-kit/pkg/errors" + + "github.com/iancoleman/strcase" + code_generator "github.com/solo-io/solo-kit/pkg/code-generator" + "github.com/solo-io/solo-kit/pkg/code-generator/codegen/templates" + "github.com/solo-io/solo-kit/pkg/code-generator/model" +) + +func GenerateResourceGroupFiles(apiGroup *model.ApiGroup, skipOutOfPackageFiles, skipGeneratedTests bool) (code_generator.Files, error) { + var files code_generator.Files + + for _, grp := range apiGroup.ResourceGroupsFoo { + // TODO joekelley this check probably doesn't make sense + if skipOutOfPackageFiles && !(strings.HasSuffix(grp.Name, "."+apiGroup.Name) || grp.Name == apiGroup.Name) { + continue + } + fs, err := generateFilesForResourceGroup(grp) + if err != nil { + return nil, err + } + files = append(files, fs...) + } + + for i := range files { + files[i].Content = fileHeader + files[i].Content + } + if skipGeneratedTests { + var filesWithoutTests code_generator.Files + for _, file := range files { + if strings.HasSuffix(file.Filename, "_test.go") { + continue + } + filesWithoutTests = append(filesWithoutTests, file) + } + files = filesWithoutTests + } + return files, nil +} + +func generateFilesForResourceGroup(rg *model.ResourceGroup) (code_generator.Files, error) { + var v code_generator.Files + for suffix, tmpl := range map[string]*template.Template{ + "_snapshot.sk.go": templates.ResourceGroupSnapshotTemplate, + "_snapshot_simple_emitter.sk.go": templates.SimpleEmitterTemplate, + "_snapshot_emitter.sk.go": templates.ResourceGroupEmitterTemplate, + "_snapshot_emitter_test.go": templates.ResourceGroupEmitterTestTemplate, + "_event_loop.sk.go": templates.ResourceGroupEventLoopTemplate, + "_simple_event_loop.sk.go": templates.SimpleEventLoopTemplate, + "_event_loop_test.go": templates.ResourceGroupEventLoopTestTemplate, + } { + content, err := generateResourceGroupFile(rg, tmpl) + if err != nil { + return nil, errors.Wrapf(err, "internal error: processing %template '%v' for resource group %v failed", tmpl.ParseName, rg.Name) + } + v = append(v, code_generator.File{ + Filename: strcase.ToSnake(rg.GoName) + suffix, + Content: content, + }) + } + return v, nil +} + +func generateResourceGroupFile(rg *model.ResourceGroup, tmpl *template.Template) (string, error) { + buf := &bytes.Buffer{} + if err := tmpl.Execute(buf, rg); err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/pkg/code-generator/codegen/templates/converter_template.go b/pkg/code-generator/codegen/templates/converter_template.go new file mode 100644 index 000000000..943ca2cea --- /dev/null +++ b/pkg/code-generator/codegen/templates/converter_template.go @@ -0,0 +1,104 @@ +package templates + +import ( + "text/template" +) + +var ConverterTemplate = template.Must(template.New("converter").Funcs(Funcs).Parse(`package {{ .ConversionGoPackageShort }} + +import ( + "errors" + + "github.com/solo-io/go-utils/versionutils/kubeapi" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + + {{- range .Conversions }} + {{- range .Projects }} + {{ .Version }} "{{ .GoPackage }}" + {{- end }} + {{- end }} +) + +{{- range .Conversions }} +{{ $resourceName := .Name }} + +type {{ upper_camel $resourceName }}UpConverter interface { + {{- range .Projects }} + {{- if .NextVersion }} + From{{ upper_camel .Version }}To{{ upper_camel .NextVersion }}(src *{{ .Version }}.{{ upper_camel $resourceName }}) *{{ .NextVersion }}.{{ upper_camel $resourceName }} + {{- end }} + {{- end }} +} + +type {{ upper_camel $resourceName }}DownConverter interface { + {{- range .Projects }} + {{- if .PreviousVersion }} + From{{ upper_camel .Version }}To{{ upper_camel .PreviousVersion }}(src *{{ .Version }}.{{ upper_camel $resourceName }}) *{{ .PreviousVersion }}.{{ upper_camel $resourceName }} + {{- end }} + {{- end }} +} + +type {{ lower_camel $resourceName }}Converter struct { + upConverter {{ upper_camel $resourceName }}UpConverter + downConverter {{ upper_camel $resourceName }}DownConverter +} + +func New{{ upper_camel $resourceName }}Converter(u {{ upper_camel $resourceName }}UpConverter, d {{ upper_camel $resourceName }}DownConverter) crd.Converter { + return &{{ lower_camel $resourceName }}Converter{ + upConverter: u, + downConverter: d, + } +} + +func (c *{{ lower_camel $resourceName }}Converter) Convert(src, dst crd.SoloKitCrd) error { + srcVersion, err := kubeapi.ParseVersion(src.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + dstVersion, err := kubeapi.ParseVersion(dst.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + + if srcVersion.GreaterThan(dstVersion) { + return c.convertDown(src, dst) + } else if srcVersion.LessThan(dstVersion) { + return c.convertUp(src, dst) + } + return crd.Copy(src, dst) +} + +func (c *{{ lower_camel $resourceName }}Converter) convertUp(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + {{- range .Projects }} + {{- if .NextVersion }} + case *{{ .Version }}.{{ upper_camel $resourceName }}: + return c.convertUp(c.upConverter.From{{ upper_camel .Version }}To{{ upper_camel .NextVersion }}(t), dst) + {{- end }} + {{- end }} + } + return errors.New("unrecognized source type, this should never happen") +} + +func (c *{{ lower_camel $resourceName }}Converter) convertDown(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + {{- range .Projects }} + {{- if .PreviousVersion }} + case *{{ .Version }}.{{ upper_camel $resourceName }}: + return c.convertDown(c.downConverter.From{{ upper_camel .Version }}To{{ upper_camel .PreviousVersion }}(t), dst) + {{- end }} + {{- end }} + } + return errors.New("unrecognized source type, this should never happen") +} + +{{- end }} +`)) diff --git a/pkg/code-generator/codegen/templates/converter_test_template.go b/pkg/code-generator/codegen/templates/converter_test_template.go new file mode 100644 index 000000000..b2f93cf79 --- /dev/null +++ b/pkg/code-generator/codegen/templates/converter_test_template.go @@ -0,0 +1,81 @@ +package templates + +import ( + "text/template" +) + +var ConverterTestTemplate = template.Must(template.New("converter_test").Funcs(Funcs).Parse(`package {{ .ConversionGoPackageShort }}_test + +{{ $short_package := .ConversionGoPackageShort }} + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + "{{ .ConversionGoPackage }}" + + {{- range .Conversions }} + {{- range .Projects }} + {{ .Version }} "{{ .GoPackage }}" + {{- end }} + {{- end }} +) + +var converter crd.Converter + +{{- range .Conversions }} +{{ $resource_name := .Name }} + +var _ = Describe("{{ upper_camel $resource_name }}Converter", func() { + BeforeEach(func() { + converter = {{ $short_package }}.New{{ upper_camel $resource_name }}Converter({{ lower_camel $resource_name }}UpConverter{}, {{ lower_camel $resource_name }}DownConverter{}) + }) + + Describe("Convert", func() { + It("works for noop conversions", func() { + src := &{{ (index .Projects 0).Version }}.{{ upper_camel $resource_name }}{Metadata: core.Metadata{Name: "test"}} + dst := &{{ (index .Projects 0).Version }}.{{ upper_camel $resource_name }}{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("test")) + }) + + It("converts all the way up", func() { + src := &{{ (index .Projects 0).Version }}.{{ upper_camel $resource_name }}{} + dst := &{{ (index .Projects (add_int (len .Projects) -1)).Version }}.{{ upper_camel $resource_name }}{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("{{ (index .Projects (add_int (len .Projects) -1)).Version }}")) + }) + + It("converts all the way down", func() { + src := &{{ (index .Projects (add_int (len .Projects) -1)).Version }}.{{ upper_camel $resource_name }}{} + dst := &{{ (index .Projects 0).Version }}.{{ upper_camel $resource_name }}{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("{{ (index .Projects 0).Version }}")) + }) + }) +}) + +type {{ lower_camel $resource_name }}UpConverter struct{} +{{- range .Projects }} +{{- if .NextVersion }} +func ({{ lower_camel $resource_name }}UpConverter) From{{ upper_camel .Version }}To{{ upper_camel .NextVersion }}(src *{{ .Version }}.{{ upper_camel $resource_name }}) *{{ .NextVersion }}.{{ upper_camel $resource_name }} { + return &{{ .NextVersion }}.{{ upper_camel $resource_name }}{Metadata: core.Metadata{Name: "{{ .NextVersion }}"}} +} +{{- end }} +{{- end }} + +type {{ lower_camel $resource_name }}DownConverter struct{} +{{- range .Projects }} +{{- if .PreviousVersion }} +func ({{ lower_camel $resource_name }}DownConverter) From{{ upper_camel .Version }}To{{ upper_camel .PreviousVersion }}(src *{{ .Version }}.{{ upper_camel $resource_name }}) *{{ .PreviousVersion }}.{{ upper_camel $resource_name }} { + return &{{ .PreviousVersion }}.{{ upper_camel $resource_name }}{Metadata: core.Metadata{Name: "{{ .PreviousVersion }}"}} +} +{{- end }} +{{- end }} + +{{- end }} +`)) diff --git a/pkg/code-generator/codegen/templates/event_loop_template.go b/pkg/code-generator/codegen/templates/event_loop_template.go index ce4067fa3..141e73083 100644 --- a/pkg/code-generator/codegen/templates/event_loop_template.go +++ b/pkg/code-generator/codegen/templates/event_loop_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceGroupEventLoopTemplate = template.Must(template.New("resource_group_event_loop").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var ResourceGroupEventLoopTemplate = template.Must(template.New("resource_group_event_loop").Funcs(Funcs).Parse(`package {{ .ApiGroup.ResourceGroupGoPackageShort }} import ( "context" @@ -50,7 +50,7 @@ func New{{ .GoName }}EventLoop(emitter {{ .GoName }}Emitter, syncer {{ .GoName } func (el *{{ lower_camel .GoName }}EventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "{{ .Project.ProjectConfig.Version }}.event_loop") + opts.Ctx = contextutils.WithLogger(opts.Ctx, "{{ .ApiGroup.ResourceGroupGoPackageShort }}.event_loop") logger := contextutils.LoggerFrom(opts.Ctx) logger.Infof("event loop started") @@ -60,7 +60,7 @@ func (el *{{ lower_camel .GoName }}EventLoop) Run(namespaces []string, opts clie if err != nil { return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "{{ .Project.ProjectConfig.Version }}.emitter errors") + go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "{{ .ApiGroup.ResourceGroupGoPackageShort }}.emitter errors") go func() { // create a new context for each loop, cancel it before each loop var cancel context.CancelFunc = func() {} diff --git a/pkg/code-generator/codegen/templates/event_loop_test_template.go b/pkg/code-generator/codegen/templates/event_loop_test_template.go index f3cbbe967..6b4c29b51 100644 --- a/pkg/code-generator/codegen/templates/event_loop_test_template.go +++ b/pkg/code-generator/codegen/templates/event_loop_test_template.go @@ -6,7 +6,7 @@ import ( var ResourceGroupEventLoopTestTemplate = template.Must(template.New("resource_group_event_loop_test").Funcs(Funcs).Parse(`// +build solokit -package {{ .Project.ProjectConfig.Version }} +package {{ .ApiGroup.ResourceGroupGoPackageShort }} {{- $clients := new_str_slice }} {{- range .Resources}} diff --git a/pkg/code-generator/codegen/templates/funcs.go b/pkg/code-generator/codegen/templates/funcs.go index 0285b013a..c2b38941a 100644 --- a/pkg/code-generator/codegen/templates/funcs.go +++ b/pkg/code-generator/codegen/templates/funcs.go @@ -82,6 +82,9 @@ var Funcs = template.FuncMap{ } return result }, + "add_int": func(a, b int) int { + return a + b + }, } func printPointer(format string, p *string) string { diff --git a/pkg/code-generator/codegen/templates/resource_client_template.go b/pkg/code-generator/codegen/templates/resource_client_template.go index 79be1d6df..7b2b4acbf 100644 --- a/pkg/code-generator/codegen/templates/resource_client_template.go +++ b/pkg/code-generator/codegen/templates/resource_client_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceClientTemplate = template.Must(template.New("resource_reconciler").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var ResourceClientTemplate = template.Must(template.New("resource_client").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" diff --git a/pkg/code-generator/codegen/templates/resource_client_test_template.go b/pkg/code-generator/codegen/templates/resource_client_test_template.go index 0b684fe7c..cf294ef71 100644 --- a/pkg/code-generator/codegen/templates/resource_client_test_template.go +++ b/pkg/code-generator/codegen/templates/resource_client_test_template.go @@ -6,7 +6,7 @@ import ( var ResourceClientTestTemplate = template.Must(template.New("resource_client_test").Funcs(Funcs).Parse(`// +build solokit -package {{ .Project.ProjectConfig.Version }} +package {{ .Project.VersionConfig.Version }} import ( "time" diff --git a/pkg/code-generator/codegen/templates/resource_reconciler_template.go b/pkg/code-generator/codegen/templates/resource_reconciler_template.go index de754b31b..9f6995a4c 100644 --- a/pkg/code-generator/codegen/templates/resource_reconciler_template.go +++ b/pkg/code-generator/codegen/templates/resource_reconciler_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceReconcilerTemplate = template.Must(template.New("resource_client").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var ResourceReconcilerTemplate = template.Must(template.New("resource_reconciler").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" diff --git a/pkg/code-generator/codegen/templates/resource_template.go b/pkg/code-generator/codegen/templates/resource_template.go index 34ad27aac..a276b1c78 100644 --- a/pkg/code-generator/codegen/templates/resource_template.go +++ b/pkg/code-generator/codegen/templates/resource_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceTemplate = template.Must(template.New("resource").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var ResourceTemplate = template.Must(template.New("resource").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} import ( "sort" @@ -196,13 +196,14 @@ func (o *{{ .Name }}) DeepCopyObject() runtime.Object { } {{- $crdGroupName := .Project.ProtoPackage }} -{{- if ne .Project.ProjectConfig.CrdGroupOverride "" }} -{{- $crdGroupName = .Project.ProjectConfig.CrdGroupOverride }} +{{- if ne .Project.VersionConfig.ApiGroup.CrdGroupOverride "" }} +{{- $crdGroupName = .Project.VersionConfig.ApiGroup.CrdGroupOverride }} {{- end}} + var ( {{ .Name }}GVK = schema.GroupVersionKind{ - Version: "{{ .Project.ProjectConfig.Version }}", + Version: "{{ .Project.VersionConfig.Version }}", Group: "{{ $crdGroupName }}", Kind: "{{ .Name }}", } diff --git a/pkg/code-generator/codegen/templates/simple_event_loop_template.go b/pkg/code-generator/codegen/templates/simple_event_loop_template.go index 038ab8995..1d8b5b709 100644 --- a/pkg/code-generator/codegen/templates/simple_event_loop_template.go +++ b/pkg/code-generator/codegen/templates/simple_event_loop_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var SimpleEventLoopTemplate = template.Must(template.New("simple_event_loop").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var SimpleEventLoopTemplate = template.Must(template.New("simple_event_loop").Funcs(Funcs).Parse(`package {{ .ApiGroup.ResourceGroupGoPackageShort }} import ( "context" @@ -49,7 +49,7 @@ func New{{ .GoName }}SimpleEventLoop(emitter {{ .GoName }}SimpleEmitter, syncers } func (el *{{ lower_camel .GoName }}SimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "{{ .Project.ProjectConfig.Version }}.event_loop") + ctx = contextutils.WithLogger(ctx, "{{ .ApiGroup.ResourceGroupGoPackageShort }}.event_loop") logger := contextutils.LoggerFrom(ctx) logger.Infof("event loop started") @@ -61,7 +61,7 @@ func (el *{{ lower_camel .GoName }}SimpleEventLoop) Run(ctx context.Context) (<- } - go errutils.AggregateErrs(ctx, errs, emitterErrs, "{{ .Project.ProjectConfig.Version }}.emitter errors") + go errutils.AggregateErrs(ctx, errs, emitterErrs, "{{ .ApiGroup.ResourceGroupGoPackageShort }}.emitter errors") go func() { // create a new context for each syncer for each loop, cancel each before each loop syncerCancels := make(map[{{ .GoName }}Syncer]context.CancelFunc) diff --git a/pkg/code-generator/codegen/templates/simple_test_suite_template.go b/pkg/code-generator/codegen/templates/simple_test_suite_template.go new file mode 100644 index 000000000..31504f7a1 --- /dev/null +++ b/pkg/code-generator/codegen/templates/simple_test_suite_template.go @@ -0,0 +1,21 @@ +package templates + +import ( + "text/template" +) + +var SimpleTestSuiteTemplate = template.Must(template.New("project_template").Funcs(Funcs).Parse(`package {{ .PackageName }}_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func Test{{ upper_camel .PackageName }}(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "{{ upper_camel .PackageName }} Suite") +} + +`)) diff --git a/pkg/code-generator/codegen/templates/snapshot_emitter_template.go b/pkg/code-generator/codegen/templates/snapshot_emitter_template.go index 900e29976..ecfb4e908 100644 --- a/pkg/code-generator/codegen/templates/snapshot_emitter_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_emitter_template.go @@ -5,7 +5,7 @@ import ( ) var ResourceGroupEmitterTemplate = template.Must(template.New("resource_group_emitter").Funcs(Funcs).Parse( - `package {{ .Project.ProjectConfig.Version }} + `package {{ .ApiGroup.ResourceGroupGoPackageShort }} {{- $client_declarations := new_str_slice }} {{- $clients := new_str_slice }} diff --git a/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go b/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go index 390005ab8..8968d7b36 100644 --- a/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go @@ -6,7 +6,7 @@ import ( var ResourceGroupEmitterTestTemplate = template.Must(template.New("resource_group_emitter_test").Funcs(Funcs).Parse(`// +build solokit -package {{ .Project.ProjectConfig.Version }} +package {{ .ApiGroup.ResourceGroupGoPackageShort }} {{- /* we need to know if the tests require a crd client or a regular clientset */ -}} {{- $clients := new_str_slice }} @@ -46,7 +46,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) -var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func() { +var _ = Describe("{{ upper_camel .ApiGroup.ResourceGroupGoPackageShort }}Emitter", func() { if os.Getenv("RUN_KUBE_TESTS") != "1" { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return diff --git a/pkg/code-generator/codegen/templates/snapshot_simple_emitter_template.go b/pkg/code-generator/codegen/templates/snapshot_simple_emitter_template.go index ec1effbaf..ecf1537c5 100644 --- a/pkg/code-generator/codegen/templates/snapshot_simple_emitter_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_simple_emitter_template.go @@ -5,7 +5,7 @@ import ( ) var SimpleEmitterTemplate = template.Must(template.New("resource_group_emitter").Funcs(Funcs).Parse( - `package {{ .Project.ProjectConfig.Version }} + `package {{ .ApiGroup.ResourceGroupGoPackageShort }} import ( "context" diff --git a/pkg/code-generator/codegen/templates/snapshot_template.go b/pkg/code-generator/codegen/templates/snapshot_template.go index 7e8ffc565..c731be298 100644 --- a/pkg/code-generator/codegen/templates/snapshot_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_template.go @@ -5,7 +5,7 @@ import ( ) var ResourceGroupSnapshotTemplate = template.Must(template.New("resource_group_snapshot").Funcs(Funcs).Parse( - `package {{ .Project.ProjectConfig.Version }} + `package {{ .ApiGroup.ResourceGroupGoPackageShort }} import ( "fmt" diff --git a/pkg/code-generator/codegen/templates/test_suite_template.go b/pkg/code-generator/codegen/templates/test_suite_template.go index f261b2242..142c1c6fc 100644 --- a/pkg/code-generator/codegen/templates/test_suite_template.go +++ b/pkg/code-generator/codegen/templates/test_suite_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ProjectTestSuiteTemplate = template.Must(template.New("project_template").Funcs(Funcs).Parse(`package {{ .ProjectConfig.Version }} +var ProjectTestSuiteTemplate = template.Must(template.New("project_test_suite_template").Funcs(Funcs).Parse(`package {{ .VersionConfig.Version }} {{- $uniqueCrds := new_str_slice }} {{- range .Resources}} @@ -27,9 +27,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func Test{{ upper_camel .ProjectConfig.Name }}(t *testing.T) { +func Test{{ upper_camel .VersionConfig.ApiGroup.Name }}(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "{{ upper_camel .ProjectConfig.Name }} Suite") + RunSpecs(t, "{{ upper_camel .VersionConfig.ApiGroup.Name }} Suite") } diff --git a/pkg/code-generator/codegen/templates/xds_template.go b/pkg/code-generator/codegen/templates/xds_template.go index 058cadfaa..d6c584117 100644 --- a/pkg/code-generator/codegen/templates/xds_template.go +++ b/pkg/code-generator/codegen/templates/xds_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var XdsTemplate = template.Must(template.New("xds_template").Funcs(Funcs).Parse(`package {{ .Project.ProjectConfig.Version }} +var XdsTemplate = template.Must(template.New("xds_template").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} import ( "context" diff --git a/pkg/code-generator/docgen/funcs/template_funcs.go b/pkg/code-generator/docgen/funcs/template_funcs.go index 7c5ce5e02..8c3839634 100644 --- a/pkg/code-generator/docgen/funcs/template_funcs.go +++ b/pkg/code-generator/docgen/funcs/template_funcs.go @@ -43,7 +43,7 @@ type templateFunctions struct { var magicCommentRegex = regexp.MustCompile("@solo-kit:.*") var githubProjectFileRegex = regexp.MustCompile(".*github.com/([^/]*)/([^/]*)/(.*)") -func TemplateFuncs(project *model.Project, docsOptions *options.DocsOptions) template.FuncMap { +func TemplateFuncs(project *model.Version, docsOptions *options.DocsOptions) template.FuncMap { funcs := &templateFunctions{} funcMap := template.FuncMap{ "join": strings.Join, @@ -180,7 +180,7 @@ func noEscape(s string) htmltemplate.HTML { return htmltemplate.HTML(s) } -func fieldType(project *model.Project) func(field *protokit.FieldDescriptor) (string, error) { +func fieldType(project *model.Version) func(field *protokit.FieldDescriptor) (string, error) { return func(field *protokit.FieldDescriptor) (string, error) { fieldTypeStr := func() string { switch field.GetType() { @@ -230,7 +230,7 @@ func wellKnownProtoLink(typeName string) string { return wellKnown } -func linkForField(project *model.Project, docsOptions *options.DocsOptions) func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { +func linkForField(project *model.Version, docsOptions *options.DocsOptions) func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { return func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { typeName, err := fieldType(project)(field) if err != nil { @@ -293,7 +293,7 @@ func linkForField(project *model.Project, docsOptions *options.DocsOptions) func } } -func linkForResource(project *model.Project, docsOptions *options.DocsOptions) func(resource *model.Resource) (string, error) { +func linkForResource(project *model.Version, docsOptions *options.DocsOptions) func(resource *model.Resource) (string, error) { protoFiles := protokit.ParseCodeGenRequest(project.Request) return func(resource *model.Resource) (string, error) { for _, file := range protoFiles { @@ -314,12 +314,12 @@ func linkForResource(project *model.Project, docsOptions *options.DocsOptions) f return fmt.Sprintf("[%v](%v%v%v#%v)", resource.Name, prefix, resource.Filename, ext, name), nil } } - return "", errors.Errorf("internal error: could not find file for resource %v in project %v", - resource.Filename, project.ProjectConfig.Name) + return "", errors.Errorf("internal error: could not find file for resource %v in apigroup %v", + resource.Filename, project.VersionConfig.ApiGroup.Name) } } -func resourceForMessage(project *model.Project) func(msg *protokit.Descriptor) (*model.Resource, error) { +func resourceForMessage(project *model.Version) func(msg *protokit.Descriptor) (*model.Resource, error) { return func(msg *protokit.Descriptor) (*model.Resource, error) { for _, res := range project.Resources { if res.SkipDocsGen { @@ -330,8 +330,8 @@ func resourceForMessage(project *model.Project) func(msg *protokit.Descriptor) ( } } return nil, nil - return nil, errors.Errorf("internal error: could not find file for resource for msg %v in project %v", - msg.GetName(), project.ProjectConfig.Name) + return nil, errors.Errorf("internal error: could not find file for resource for msg %v in apigroup %v", + msg.GetName(), project.VersionConfig.ApiGroup.Name) } } @@ -408,7 +408,7 @@ func commonPrefix(sep byte, paths ...string) string { return string(c) } -func getFileForField(project *model.Project, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, error) { +func getFileForField(project *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, error) { parts := strings.Split(strings.TrimPrefix(field.GetTypeName(), "."), ".") if strings.HasSuffix(parts[len(parts)-1], "Entry") { parts = parts[:len(parts)-1] @@ -457,7 +457,7 @@ func splitTypeName(typeName string) (string, []string) { return packageName, parts[indexOfFirstUppercasePart:] } -func getFileAndTypeDefForField(project *model.Project, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, *descriptor.DescriptorProto, *descriptor.EnumDescriptorProto, error) { +func getFileAndTypeDefForField(project *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, *descriptor.DescriptorProto, *descriptor.EnumDescriptorProto, error) { packageName, typeNameParts := splitTypeName(field.GetTypeName()) for _, protoFile := range project.Request.ProtoFile { if protoFile.GetPackage() == packageName { @@ -564,7 +564,7 @@ func (c *templateFunctions) forEachMessage(messagesToSkip map[string]bool) func( // Returns a map indicating which resources should be skipped during doc generation. // The keys are strings in the format .. -func getMessageSkippingInfo(project *model.Project) map[string]bool { +func getMessageSkippingInfo(project *model.Version) map[string]bool { // Build map for quick lookup of SkipDocsGen flag toSkip := make(map[string]bool) for _, resource := range project.Resources { diff --git a/pkg/code-generator/docgen/generator.go b/pkg/code-generator/docgen/generator.go index d6383caa2..412d35818 100644 --- a/pkg/code-generator/docgen/generator.go +++ b/pkg/code-generator/docgen/generator.go @@ -18,7 +18,7 @@ import ( type DocsGen struct { DocsOptions options.DocsOptions - Project *model.Project + Project *model.Version } // must ignore validate.proto from lyft @@ -89,7 +89,7 @@ func (d *DocsGen) GenerateFilesForProtoFiles(protoFiles []*protokit.FileDescript return v, nil } -func GenerateFiles(project *model.Project, docsOptions *options.DocsOptions) (code_generator.Files, error) { +func GenerateFiles(project *model.Version, docsOptions *options.DocsOptions) (code_generator.Files, error) { protoFiles := protokit.ParseCodeGenRequest(project.Request) if docsOptions == nil { docsOptions = &options.DocsOptions{} @@ -167,14 +167,14 @@ func (d *DocsGen) GenerateFilesForProject() (code_generator.Files, error) { return nil, err } v = append(v, code_generator.File{ - Filename: strcase.ToSnake(d.Project.ProjectConfig.Name) + suffix, + Filename: strcase.ToSnake(d.Project.VersionConfig.ApiGroup.Name) + suffix, Content: content, }) } return v, nil } -func generateProjectFile(project *model.Project, tmpl *template.Template) (string, error) { +func generateProjectFile(project *model.Version, tmpl *template.Template) (string, error) { buf := &bytes.Buffer{} if err := tmpl.Execute(buf, project); err != nil { return "", err diff --git a/pkg/code-generator/docgen/templates/markdown/project_template.go b/pkg/code-generator/docgen/templates/markdown/project_template.go index aec72bd63..f76156ae7 100644 --- a/pkg/code-generator/docgen/templates/markdown/project_template.go +++ b/pkg/code-generator/docgen/templates/markdown/project_template.go @@ -9,14 +9,14 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProjectDocsRootTemplate(project *model.Project, docsOptions *options.DocsOptions) *template.Template { +func ProjectDocsRootTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` -### API Reference for {{ .ProjectConfig.Title}} +### API Reference for {{ .VersionConfig.ApiGroup.SoloKitProject.Title}} -API Version: ` + "`{{ .ProjectConfig.Name }}.{{ .ProjectConfig.Version }}`" + ` +API Version: ` + "`{{ .VersionConfig.ApiGroup.Name }}.{{ .VersionConfig.Version }}`" + ` -{{ .ProjectConfig.Description }} +{{ .VersionConfig.ApiGroup.SoloKitProject.Description }} ### API Resources: {{- range .Resources}} @@ -29,5 +29,5 @@ API Version: ` + "`{{ .ProjectConfig.Name }}.{{ .ProjectConfig.Version }}`" + ` ` - return template.Must(template.New("pf").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(str)) + return template.Must(template.New("markdown_project").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(str)) } diff --git a/pkg/code-generator/docgen/templates/markdown/proto_file_template.go b/pkg/code-generator/docgen/templates/markdown/proto_file_template.go index 8db65db44..3624e92be 100644 --- a/pkg/code-generator/docgen/templates/markdown/proto_file_template.go +++ b/pkg/code-generator/docgen/templates/markdown/proto_file_template.go @@ -8,7 +8,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProtoFileTemplate(project *model.Project, docsOptions *options.DocsOptions) *template.Template { +func ProtoFileTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` {{ $File := . -}} diff --git a/pkg/code-generator/docgen/templates/restructured/project_template.go b/pkg/code-generator/docgen/templates/restructured/project_template.go index 3152f547b..2e9397827 100644 --- a/pkg/code-generator/docgen/templates/restructured/project_template.go +++ b/pkg/code-generator/docgen/templates/restructured/project_template.go @@ -9,16 +9,16 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProjectDocsRootTemplate(project *model.Project, docsOptions *options.DocsOptions) *template.Template { - return template.Must(template.New("pf").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(` +func ProjectDocsRootTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { + return template.Must(template.New("restructured_project").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(` =========================================== -API Reference for {{ .ProjectConfig.Title}} +API Reference for {{ .VersionConfig.ApiGroup.SoloKitProject.Title }} =========================================== -.. _{{ .ProjectConfig.Title}}: +.. _{{ .VersionConfig.ApiGroup.SoloKitProject.Title }}: -API Version: ` + "`{{ .ProjectConfig.Name }}.{{ .ProjectConfig.Version }}`" + ` +API Version: ` + "`{{ .VersionConfig.ApiGroup.Name }}.{{ .VersionConfig.Version }}`" + ` -{{ .ProjectConfig.Description }} +{{ .VersionConfig.ApiGroup.SoloKitProject.Description }} API Resources: ~~~~~~~~~~~~~~ diff --git a/pkg/code-generator/docgen/templates/restructured/proto_file_template.go b/pkg/code-generator/docgen/templates/restructured/proto_file_template.go index 12333abd3..b7caa9be9 100644 --- a/pkg/code-generator/docgen/templates/restructured/proto_file_template.go +++ b/pkg/code-generator/docgen/templates/restructured/proto_file_template.go @@ -8,7 +8,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProtoFileTemplate(project *model.Project, docsOptions *options.DocsOptions) *template.Template { +func ProtoFileTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` {{ $File := . -}} diff --git a/pkg/code-generator/model/conversion.go b/pkg/code-generator/model/conversion.go new file mode 100644 index 000000000..0bd6db5f2 --- /dev/null +++ b/pkg/code-generator/model/conversion.go @@ -0,0 +1,15 @@ +package model + +// SOLO-KIT Descriptors from which code can be generated + +type Conversion struct { + Name string + Projects []*ConversionProject +} + +type ConversionProject struct { + Version string + NextVersion string + PreviousVersion string + GoPackage string +} diff --git a/pkg/code-generator/model/project.go b/pkg/code-generator/model/project.go index a6476fba2..bd96d39c2 100644 --- a/pkg/code-generator/model/project.go +++ b/pkg/code-generator/model/project.go @@ -18,33 +18,63 @@ const ProjectConfigFilename = "solo-kit.json" // SOLO-KIT Descriptors from which code can be generated -type ProjectConfig struct { - Title string `json:"title"` - Description string `json:"description"` - Name string `json:"name"` - Version string `json:"version"` - DocsDir string `json:"docs_dir"` - ResourceGroups map[string][]ResourceConfig `json:"resource_groups"` - // if set, this group will override the proto pacakge typically used +type SoloKitProject struct { + Title string `json:"title"` + Description string `json:"description"` + ApiGroups []*ApiGroup `json:"api_groups"` + + // set by load + ProjectFile string +} + +type ApiGroup struct { + Name string `json:"name"` + DocsDir string `json:"docs_dir"` + VersionConfigs []*VersionConfig `json:"version_configs"` + ResourceGroups map[string][]ResourceConfig `json:"resource_groups"` + ResourceGroupGoPackage string `json:"resource_group_go_package"` + ConversionGoPackage string `json:"conversion_go_package"` + + // if set, this group will override the proto package typically used // as the api group for the crd CrdGroupOverride string `json:"crd_group_override"` - // imported solokit projects + // imported solokit projects, used for resource groups Imports []string `json:"imports"` + // set by load + SoloKitProject *SoloKitProject + Conversions []*Conversion + // TODO joekelley improve name + ResourceGroupsFoo []*ResourceGroup + ConversionGoPackageShort string + ResourceGroupGoPackageShort string +} + +func (a ApiGroup) IsOurProto(protoFile string) bool { + for _, vc := range a.VersionConfigs { + if vc.IsOurProto(protoFile) { + return true + } + } + return false +} + +type VersionConfig struct { + Version string `json:"version"` + // define custom resources here CustomResources []CustomResourceConfig `json:"custom_resources"` - // set by load if not specified GoPackage string `json:"go_package"` // set by load - ProjectFile string - ProjectProtos []string + ApiGroup *ApiGroup + VersionProtos []string } -func (p ProjectConfig) IsOurProto(protoFile string) bool { - for _, file := range p.ProjectProtos { +func (p VersionConfig) IsOurProto(protoFile string) bool { + for _, file := range p.VersionProtos { if protoFile == file { return true } @@ -55,7 +85,6 @@ func (p ProjectConfig) IsOurProto(protoFile string) bool { type ResourceConfig struct { ResourceName string `json:"name"` ResourcePackage string `json:"package"` // resource package doubles as the proto package or the go import package - ResourceVersion string `json:"version"` // version of the resource, used to distinguish when multiple versions of a resource exist } // Create a Solo-Kit backed resource from @@ -73,12 +102,11 @@ type CustomResourceConfig struct { Imported bool } -type Project struct { - ProjectConfig ProjectConfig - ProtoPackage string - Resources []*Resource - ResourceGroups []*ResourceGroup - XDSResources []*XDSResource +type Version struct { + VersionConfig VersionConfig + ProtoPackage string + Resources []*Resource + XDSResources []*XDSResource Request *plugin_go.CodeGeneratorRequest } @@ -108,7 +136,7 @@ type Resource struct { // resource groups i belong to ResourceGroups []*ResourceGroup // project i belong to - Project *Project + Project *Version Filename string // the proto file where this resource is contained Version string // set during parsing from this resource's solo-kit.json @@ -133,7 +161,7 @@ type ResourceGroup struct { Name string // eg. api.gloo.solo.io GoName string // will be Api Imports string // if this resource group contains any imports from other projects - Project *Project + ApiGroup *ApiGroup Resources []*Resource } @@ -143,40 +171,47 @@ type XDSResource struct { NameField string NoReferences bool - Project *Project + Project *Version ProtoPackage string // eg. gloo.solo.io Filename string // the proto file where this resource is contained } -func LoadProjectConfig(path string) (ProjectConfig, error) { +func LoadProjectConfig(path string) (SoloKitProject, error) { b, err := ioutil.ReadFile(path) if err != nil { - return ProjectConfig{}, err + return SoloKitProject{}, err } - var pc ProjectConfig - err = json.Unmarshal(b, &pc) + var skp SoloKitProject + err = json.Unmarshal(b, &skp) if err != nil { - return ProjectConfig{}, err + return SoloKitProject{}, err } - pc.ProjectFile = path - if pc.GoPackage == "" { - goPkg, err := detectGoPackageForProject(path) - if err != nil { - return ProjectConfig{}, err + + skp.ProjectFile = path + for _, ag := range skp.ApiGroups { + goPackageSegments := strings.Split(ag.ResourceGroupGoPackage, "/") + ag.ResourceGroupGoPackageShort = goPackageSegments[len(goPackageSegments)-1] + for _, vc := range ag.VersionConfigs { + if vc.GoPackage == "" { + goPkg, err := detectGoPackageForVersion(filepath.Dir(skp.ProjectFile) + "/" + vc.Version) + if err != nil { + return SoloKitProject{}, err + } + vc.GoPackage = goPkg + } } - pc.GoPackage = goPkg } - return pc, err + + return skp, err } var goPackageStatementRegex = regexp.MustCompile(`option go_package.*=.*"(.*)";`) -// Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile -func detectGoPackageForProject(projectFile string) (string, error) { +// Returns the value of the 'go_package' option of the first .proto file found in the version's directory +func detectGoPackageForVersion(versionDir string) (string, error) { var goPkg string - projectDir := filepath.Dir(projectFile) - if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error { + if err := filepath.Walk(versionDir, func(protoFile string, info os.FileInfo, err error) error { // already set if goPkg != "" { return nil @@ -185,7 +220,7 @@ func detectGoPackageForProject(projectFile string) (string, error) { return nil } // search for go_package on protos in the same dir as the project.json - if projectDir != filepath.Dir(protoFile) { + if versionDir != filepath.Dir(protoFile) { return nil } content, err := ioutil.ReadFile(protoFile) @@ -209,7 +244,7 @@ func detectGoPackageForProject(projectFile string) (string, error) { return "", err } if goPkg == "" { - return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile) + return "", errors.Errorf("no go_package statement found in root dir of project %v", versionDir) } return goPkg, nil } diff --git a/pkg/code-generator/model/simple_test_suite.go b/pkg/code-generator/model/simple_test_suite.go new file mode 100644 index 000000000..adc1bffdc --- /dev/null +++ b/pkg/code-generator/model/simple_test_suite.go @@ -0,0 +1,5 @@ +package model + +type TestSuite struct { + PackageName string +} diff --git a/pkg/code-generator/parser/parser.go b/pkg/code-generator/parser/parser.go index 56660cb4e..ecfb2f5a5 100644 --- a/pkg/code-generator/parser/parser.go +++ b/pkg/code-generator/parser/parser.go @@ -13,7 +13,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProcessDescriptors(projectConfig *model.ProjectConfig, allProjectConfigs []*model.ProjectConfig, descriptors []*descriptor.FileDescriptorProto) (*model.Project, error) { +func ProcessDescriptors(versionConfig *model.VersionConfig, apiGroup *model.ApiGroup, descriptors []*descriptor.FileDescriptorProto) (*model.Version, error) { req := &plugin_go.CodeGeneratorRequest{} for _, file := range descriptors { var added bool @@ -28,11 +28,11 @@ func ProcessDescriptors(projectConfig *model.ProjectConfig, allProjectConfigs [] req.FileToGenerate = append(req.FileToGenerate, file.GetName()) req.ProtoFile = append(req.ProtoFile, file) } - return parseRequest(projectConfig, allProjectConfigs, req) + return parseRequest(versionConfig, apiGroup, req) } -func parseRequest(projectConfig *model.ProjectConfig, allProjectConfigs []*model.ProjectConfig, req *plugin_go.CodeGeneratorRequest) (*model.Project, error) { - log.Printf("project config: %v", projectConfig) +func parseRequest(versionConfig *model.VersionConfig, apiGroup *model.ApiGroup, req *plugin_go.CodeGeneratorRequest) (*model.Version, error) { + log.Printf("version config: %v", versionConfig) descriptors := protokit.ParseCodeGenRequest(req) var messages []ProtoMessageWrapper @@ -54,26 +54,25 @@ func parseRequest(projectConfig *model.ProjectConfig, allProjectConfigs []*model services = append(services, file.GetServices()...) } - project := &model.Project{ - ProjectConfig: *projectConfig, - ProtoPackage: projectConfig.Name, + version := &model.Version{ + VersionConfig: *versionConfig, + ProtoPackage: versionConfig.ApiGroup.Name, Request: req, } - resources, resourceGroups, err := getResources(project, allProjectConfigs, messages) + resources, err := getResources(version, apiGroup, messages) if err != nil { return nil, err } - xdsResources, err := getXdsResources(project, messages, services) + xdsResources, err := getXdsResources(version, messages, services) if err != nil { return nil, err } - project.Resources = resources - project.ResourceGroups = resourceGroups - project.XDSResources = xdsResources + version.Resources = resources + version.XDSResources = xdsResources - return project, nil + return version, nil } func goName(n string) string { diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index 6a47bcd72..15e5bfa68 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -34,16 +34,9 @@ type ProtoMessageWrapper struct { Message *protokit.Descriptor } -func getResource(resources []*model.Resource, project model.ProjectConfig, cfg model.ResourceConfig) (*model.Resource, error) { +func getResource(resources []*model.Resource, cfg model.ResourceConfig) (*model.Resource, error) { matches := func(res *model.Resource) bool { - if res.Name == cfg.ResourceName && - (res.ProtoPackage == cfg.ResourcePackage || res.GoPackage == cfg.ResourcePackage) { - if cfg.ResourceVersion == "" { - return true - } - return cfg.ResourceVersion == res.Version - } - return false + return res.Name == cfg.ResourceName //&& (res.ProtoPackage == cfg.ResourcePackage || res.GoPackage == cfg.ResourcePackage) } // collect all resources that match on package and name @@ -59,40 +52,34 @@ func getResource(resources []*model.Resource, project model.ProjectConfig, cfg m case 0: return nil, errors.Errorf("getting resource: message %v not found", cfg) } - // default to using the version matching the project itself - // only works for this project's resources - for _, res := range possibleResources { - if res.GoPackage == project.GoPackage { - return res, nil - } - } - return nil, errors.Errorf("found %v resources found which match %v, try specifying a version", len(possibleResources), cfg) + + return possibleResources[0], nil } -func getResources(project *model.Project, allProjectConfigs []*model.ProjectConfig, messages []ProtoMessageWrapper) ([]*model.Resource, []*model.ResourceGroup, error) { +func getResources(version *model.Version, apiGroup *model.ApiGroup, messages []ProtoMessageWrapper) ([]*model.Resource, error) { var ( resources []*model.Resource ) for _, msg := range messages { resource, err := describeResource(msg) if err != nil { - return nil, nil, err + return nil, err } if resource == nil { // not a solo-kit resource, ignore continue } - for _, projectCfg := range allProjectConfigs { - if projectCfg.IsOurProto(resource.Filename) { - resource.Version = projectCfg.Version + for _, vc := range apiGroup.VersionConfigs { + if vc.IsOurProto(resource.Filename) { + resource.Version = vc.Version break } } - resource.Project = project + resource.Project = version resources = append(resources, resource) } - for _, custom := range project.ProjectConfig.CustomResources { + for _, custom := range version.VersionConfig.CustomResources { impPrefix := strings.Replace(custom.Package, "/", "_", -1) impPrefix = strings.Replace(impPrefix, ".", "_", -1) impPrefix = strings.Replace(impPrefix, "-", "_", -1) @@ -104,29 +91,33 @@ func getResources(project *model.Project, allProjectConfigs []*model.ProjectConf ClusterScoped: custom.ClusterScoped, CustomImportPrefix: impPrefix, SkipDocsGen: true, - Project: project, + Project: version, IsCustom: true, CustomResource: custom, }) } + return resources, nil +} + +func GetResourceGroups(apiGroup *model.ApiGroup, resources []*model.Resource) ([]*model.ResourceGroup, error) { var ( resourceGroups []*model.ResourceGroup ) - for groupName, resourcesCfg := range project.ProjectConfig.ResourceGroups { + for groupName, resourcesCfg := range apiGroup.ResourceGroups { var resourcesForGroup []*model.Resource for _, resourceCfg := range resourcesCfg { - resource, err := getResource(resources, project.ProjectConfig, resourceCfg) + resource, err := getResource(resources, resourceCfg) if err != nil { - return nil, nil, err + return nil, err } var importPrefix string - if !project.ProjectConfig.IsOurProto(resource.Filename) && !resource.IsCustom { + if !apiGroup.IsOurProto(resource.Filename) && !resource.IsCustom { importPrefix = resource.ProtoPackage } else if resource.IsCustom && resource.CustomResource.Imported { - // If is custom resource from a different project use import prefix + // If is custom resource from a different version use import prefix importPrefix = resource.CustomImportPrefix } @@ -140,7 +131,7 @@ func getResources(project *model.Project, allProjectConfigs []*model.ProjectConf rg := &model.ResourceGroup{ Name: groupName, GoName: goName(groupName), - Project: project, + ApiGroup: apiGroup, Resources: resourcesForGroup, } for _, res := range resourcesForGroup { @@ -150,7 +141,7 @@ func getResources(project *model.Project, allProjectConfigs []*model.ProjectConf imports := make(map[string]string) for _, res := range rg.Resources { // only generate files for the resources in our group, otherwise we import - if res.GoPackage != rg.Project.ProjectConfig.GoPackage { + if res.GoPackage != rg.ApiGroup.ResourceGroupGoPackage { // add import imports[strings.TrimSuffix(res.ImportPrefix, ".")] = res.GoPackage } @@ -179,7 +170,7 @@ func getResources(project *model.Project, allProjectConfigs []*model.ProjectConf sort.SliceStable(resourceGroups, func(i, j int) bool { return resourceGroups[i].Name < resourceGroups[j].Name }) - return resources, resourceGroups, nil + return resourceGroups, nil } func describeResource(messageWrapper ProtoMessageWrapper) (*model.Resource, error) { diff --git a/pkg/code-generator/parser/parser_xds.go b/pkg/code-generator/parser/parser_xds.go index e5dcb1acc..883e3dc2a 100644 --- a/pkg/code-generator/parser/parser_xds.go +++ b/pkg/code-generator/parser/parser_xds.go @@ -32,7 +32,7 @@ type xdsMessage struct { fileName string } -func getXdsResources(project *model.Project, messages []ProtoMessageWrapper, services []*protokit.ServiceDescriptor) ([]*model.XDSResource, error) { +func getXdsResources(project *model.Version, messages []ProtoMessageWrapper, services []*protokit.ServiceDescriptor) ([]*model.XDSResource, error) { var msgs []*xdsMessage var svcs []*xdsService @@ -70,7 +70,7 @@ func getXdsResources(project *model.Project, messages []ProtoMessageWrapper, ser return processMessagesAndServices(project, msgs, svcs) } -func processMessagesAndServices(project *model.Project, msgs []*xdsMessage, svcs []*xdsService) ([]*model.XDSResource, error) { +func processMessagesAndServices(project *model.Version, msgs []*xdsMessage, svcs []*xdsService) ([]*model.XDSResource, error) { var resources []*model.XDSResource for _, svc := range svcs { var message *xdsMessage diff --git a/test/mocks/v1alpha1/testing_event_loop.sk.go b/pkg/multicluster/group/kubeconfigs_event_loop.sk.go similarity index 66% rename from test/mocks/v1alpha1/testing_event_loop.sk.go rename to pkg/multicluster/group/kubeconfigs_event_loop.sk.go index 6c8a4c22e..994b3229d 100644 --- a/test/mocks/v1alpha1/testing_event_loop.sk.go +++ b/pkg/multicluster/group/kubeconfigs_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1alpha1 +package group import ( "context" @@ -16,13 +16,13 @@ import ( "github.com/solo-io/solo-kit/pkg/errors" ) -type TestingSyncer interface { - Sync(context.Context, *TestingSnapshot) error +type KubeconfigsSyncer interface { + Sync(context.Context, *KubeconfigsSnapshot) error } -type TestingSyncers []TestingSyncer +type KubeconfigsSyncers []KubeconfigsSyncer -func (s TestingSyncers) Sync(ctx context.Context, snapshot *TestingSnapshot) error { +func (s KubeconfigsSyncers) Sync(ctx context.Context, snapshot *KubeconfigsSnapshot) error { var multiErr *multierror.Error for _, syncer := range s { if err := syncer.Sync(ctx, snapshot); err != nil { @@ -32,21 +32,21 @@ func (s TestingSyncers) Sync(ctx context.Context, snapshot *TestingSnapshot) err return multiErr.ErrorOrNil() } -type testingEventLoop struct { - emitter TestingEmitter - syncer TestingSyncer +type kubeconfigsEventLoop struct { + emitter KubeconfigsEmitter + syncer KubeconfigsSyncer } -func NewTestingEventLoop(emitter TestingEmitter, syncer TestingSyncer) eventloop.EventLoop { - return &testingEventLoop{ +func NewKubeconfigsEventLoop(emitter KubeconfigsEmitter, syncer KubeconfigsSyncer) eventloop.EventLoop { + return &kubeconfigsEventLoop{ emitter: emitter, syncer: syncer, } } -func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { +func (el *kubeconfigsEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1alpha1.event_loop") + opts.Ctx = contextutils.WithLogger(opts.Ctx, "group.event_loop") logger := contextutils.LoggerFrom(opts.Ctx) logger.Infof("event loop started") @@ -56,7 +56,7 @@ func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<- if err != nil { return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1alpha1.emitter errors") + go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each loop, cancel it before each loop var cancel context.CancelFunc = func() {} @@ -71,7 +71,7 @@ func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<- // cancel any open watches from previous loop cancel() - ctx, span := trace.StartSpan(opts.Ctx, "testing.solo.io.EventLoopSync") + ctx, span := trace.StartSpan(opts.Ctx, "kubeconfigs.multicluster.solo.io.EventLoopSync") ctx, canc := context.WithCancel(ctx) cancel = canc err := el.syncer.Sync(ctx, snapshot) diff --git a/test/mocks/v1alpha1/testing_event_loop_test.go b/pkg/multicluster/group/kubeconfigs_event_loop_test.go similarity index 58% rename from test/mocks/v1alpha1/testing_event_loop_test.go rename to pkg/multicluster/group/kubeconfigs_event_loop_test.go index 0950ce3d6..1cc4f430b 100644 --- a/test/mocks/v1alpha1/testing_event_loop_test.go +++ b/pkg/multicluster/group/kubeconfigs_event_loop_test.go @@ -2,7 +2,7 @@ // +build solokit -package v1alpha1 +package group import ( "context" @@ -16,46 +16,46 @@ import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" ) -var _ = Describe("TestingEventLoop", func() { +var _ = Describe("KubeconfigsEventLoop", func() { var ( namespace string - emitter TestingEmitter + emitter KubeconfigsEmitter err error ) BeforeEach(func() { - mockResourceClientFactory := &factory.MemoryResourceClientFactory{ + kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockResourceClient, err := NewMockResourceClient(mockResourceClientFactory) + kubeConfigClient, err := NewKubeConfigClient(kubeConfigClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient) + emitter = NewKubeconfigsEmitter(kubeConfigClient) }) It("runs sync function on a new snapshot", func() { - _, err = emitter.MockResource().Write(NewMockResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.KubeConfig().Write(NewKubeConfig(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - sync := &mockTestingSyncer{} - el := NewTestingEventLoop(emitter, sync) + sync := &mockKubeconfigsSyncer{} + el := NewKubeconfigsEventLoop(emitter, sync) _, err := el.Run([]string{namespace}, clients.WatchOpts{}) Expect(err).NotTo(HaveOccurred()) Eventually(sync.Synced, 5*time.Second).Should(BeTrue()) }) }) -type mockTestingSyncer struct { +type mockKubeconfigsSyncer struct { synced bool mutex sync.Mutex } -func (s *mockTestingSyncer) Synced() bool { +func (s *mockKubeconfigsSyncer) Synced() bool { s.mutex.Lock() defer s.mutex.Unlock() return s.synced } -func (s *mockTestingSyncer) Sync(ctx context.Context, snap *TestingSnapshot) error { +func (s *mockKubeconfigsSyncer) Sync(ctx context.Context, snap *KubeconfigsSnapshot) error { s.mutex.Lock() s.synced = true s.mutex.Unlock() diff --git a/test/mocks/v2alpha1/testing_simple_event_loop.sk.go b/pkg/multicluster/group/kubeconfigs_simple_event_loop.sk.go similarity index 64% rename from test/mocks/v2alpha1/testing_simple_event_loop.sk.go rename to pkg/multicluster/group/kubeconfigs_simple_event_loop.sk.go index ba546366e..46cf4ff4e 100644 --- a/test/mocks/v2alpha1/testing_simple_event_loop.sk.go +++ b/pkg/multicluster/group/kubeconfigs_simple_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v2alpha1 +package group import ( "context" @@ -19,31 +19,31 @@ import ( // it should be restarted (including having its context cancelled) // based on a diff of the previous and current snapshot -// Deprecated: use TestingSyncDeciderWithContext -type TestingSyncDecider interface { - TestingSyncer - ShouldSync(old, new *TestingSnapshot) bool +// Deprecated: use KubeconfigsSyncDeciderWithContext +type KubeconfigsSyncDecider interface { + KubeconfigsSyncer + ShouldSync(old, new *KubeconfigsSnapshot) bool } -type TestingSyncDeciderWithContext interface { - TestingSyncer - ShouldSync(ctx context.Context, old, new *TestingSnapshot) bool +type KubeconfigsSyncDeciderWithContext interface { + KubeconfigsSyncer + ShouldSync(ctx context.Context, old, new *KubeconfigsSnapshot) bool } -type testingSimpleEventLoop struct { - emitter TestingSimpleEmitter - syncers []TestingSyncer +type kubeconfigsSimpleEventLoop struct { + emitter KubeconfigsSimpleEmitter + syncers []KubeconfigsSyncer } -func NewTestingSimpleEventLoop(emitter TestingSimpleEmitter, syncers ...TestingSyncer) eventloop.SimpleEventLoop { - return &testingSimpleEventLoop{ +func NewKubeconfigsSimpleEventLoop(emitter KubeconfigsSimpleEmitter, syncers ...KubeconfigsSyncer) eventloop.SimpleEventLoop { + return &kubeconfigsSimpleEventLoop{ emitter: emitter, syncers: syncers, } } -func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v2alpha1.event_loop") +func (el *kubeconfigsSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { + ctx = contextutils.WithLogger(ctx, "group.event_loop") logger := contextutils.LoggerFrom(ctx) logger.Infof("event loop started") @@ -54,10 +54,10 @@ func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v2alpha1.emitter errors") + go errutils.AggregateErrs(ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[TestingSyncer]context.CancelFunc) + syncerCancels := make(map[KubeconfigsSyncer]context.CancelFunc) // use closure to allow cancel function to be updated as context changes defer func() { @@ -67,7 +67,7 @@ func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) }() // cache the previous snapshot for comparison - var previousSnapshot *TestingSnapshot + var previousSnapshot *KubeconfigsSnapshot for { select { @@ -79,11 +79,11 @@ func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) // cancel any open watches from previous loop for _, syncer := range el.syncers { // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(TestingSyncDecider); isDecider { + if syncDecider, isDecider := syncer.(KubeconfigsSyncDecider); isDecider { if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { continue // skip syncing this syncer } - } else if syncDeciderWithContext, isDecider := syncer.(TestingSyncDeciderWithContext); isDecider { + } else if syncDeciderWithContext, isDecider := syncer.(KubeconfigsSyncDeciderWithContext); isDecider { if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { continue // skip syncing this syncer } @@ -95,7 +95,7 @@ func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) cancel() } - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("testing.solo.io.SimpleEventLoopSync-%T", syncer)) + ctx, span := trace.StartSpan(ctx, fmt.Sprintf("kubeconfigs.multicluster.solo.io.SimpleEventLoopSync-%T", syncer)) ctx, canc := context.WithCancel(ctx) err := syncer.Sync(ctx, snapshot) span.End() diff --git a/pkg/multicluster/group/kubeconfigs_snapshot.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot.sk.go new file mode 100644 index 000000000..7ef8d190b --- /dev/null +++ b/pkg/multicluster/group/kubeconfigs_snapshot.sk.go @@ -0,0 +1,60 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "fmt" + + "github.com/solo-io/go-utils/hashutils" + "go.uber.org/zap" +) + +type KubeconfigsSnapshot struct { + Kubeconfigs KubeConfigList +} + +func (s KubeconfigsSnapshot) Clone() KubeconfigsSnapshot { + return KubeconfigsSnapshot{ + Kubeconfigs: s.Kubeconfigs.Clone(), + } +} + +func (s KubeconfigsSnapshot) Hash() uint64 { + return hashutils.HashAll( + s.hashKubeconfigs(), + ) +} + +func (s KubeconfigsSnapshot) hashKubeconfigs() uint64 { + return hashutils.HashAll(s.Kubeconfigs.AsInterfaces()...) +} + +func (s KubeconfigsSnapshot) HashFields() []zap.Field { + var fields []zap.Field + fields = append(fields, zap.Uint64("kubeconfigs", s.hashKubeconfigs())) + + return append(fields, zap.Uint64("snapshotHash", s.Hash())) +} + +type KubeconfigsSnapshotStringer struct { + Version uint64 + Kubeconfigs []string +} + +func (ss KubeconfigsSnapshotStringer) String() string { + s := fmt.Sprintf("KubeconfigsSnapshot %v\n", ss.Version) + + s += fmt.Sprintf(" Kubeconfigs %v\n", len(ss.Kubeconfigs)) + for _, name := range ss.Kubeconfigs { + s += fmt.Sprintf(" %v\n", name) + } + + return s +} + +func (s KubeconfigsSnapshot) Stringer() KubeconfigsSnapshotStringer { + return KubeconfigsSnapshotStringer{ + Version: s.Hash(), + Kubeconfigs: s.Kubeconfigs.NamespacesDotNames(), + } +} diff --git a/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go new file mode 100644 index 000000000..21e331797 --- /dev/null +++ b/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go @@ -0,0 +1,175 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/solo-io/go-utils/errutils" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/errors" +) + +var ( + mKubeconfigsSnapshotIn = stats.Int64("kubeconfigs.multicluster.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") + mKubeconfigsSnapshotOut = stats.Int64("kubeconfigs.multicluster.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") + + kubeconfigssnapshotInView = &view.View{ + Name: "kubeconfigs.multicluster.solo.io_snap_emitter/snap_in", + Measure: mKubeconfigsSnapshotIn, + Description: "The number of snapshots updates coming in", + Aggregation: view.Count(), + TagKeys: []tag.Key{}, + } + kubeconfigssnapshotOutView = &view.View{ + Name: "kubeconfigs.multicluster.solo.io/snap_emitter/snap_out", + Measure: mKubeconfigsSnapshotOut, + Description: "The number of snapshots updates going out", + Aggregation: view.Count(), + TagKeys: []tag.Key{}, + } +) + +func init() { + view.Register(kubeconfigssnapshotInView, kubeconfigssnapshotOutView) +} + +type KubeconfigsEmitter interface { + Register() error + KubeConfig() KubeConfigClient + Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) +} + +func NewKubeconfigsEmitter(kubeConfigClient KubeConfigClient) KubeconfigsEmitter { + return NewKubeconfigsEmitterWithEmit(kubeConfigClient, make(chan struct{})) +} + +func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, emit <-chan struct{}) KubeconfigsEmitter { + return &kubeconfigsEmitter{ + kubeConfig: kubeConfigClient, + forceEmit: emit, + } +} + +type kubeconfigsEmitter struct { + forceEmit <-chan struct{} + kubeConfig KubeConfigClient +} + +func (c *kubeconfigsEmitter) Register() error { + if err := c.kubeConfig.Register(); err != nil { + return err + } + return nil +} + +func (c *kubeconfigsEmitter) KubeConfig() KubeConfigClient { + return c.kubeConfig +} + +func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) { + + if len(watchNamespaces) == 0 { + watchNamespaces = []string{""} + } + + for _, ns := range watchNamespaces { + if ns == "" && len(watchNamespaces) > 1 { + return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + + "specific namespaces or \"\" AllNamespaces, but not both.") + } + } + + errs := make(chan error) + var done sync.WaitGroup + ctx := opts.Ctx + /* Create channel for KubeConfig */ + type kubeConfigListWithNamespace struct { + list KubeConfigList + namespace string + } + kubeConfigChan := make(chan kubeConfigListWithNamespace) + + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for KubeConfig */ + kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting KubeConfig watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-kubeconfigs") + }(namespace) + + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return + case kubeConfigList := <-kubeConfigNamespacesChan: + select { + case <-ctx.Done(): + return + case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: + } + } + } + }(namespace) + } + + snapshots := make(chan *KubeconfigsSnapshot) + go func() { + originalSnapshot := KubeconfigsSnapshot{} + currentSnapshot := originalSnapshot.Clone() + timer := time.NewTicker(time.Second * 1) + sync := func() { + if originalSnapshot.Hash() == currentSnapshot.Hash() { + return + } + + stats.Record(ctx, mKubeconfigsSnapshotOut.M(1)) + originalSnapshot = currentSnapshot.Clone() + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + } + kubeconfigsByNamespace := make(map[string]KubeConfigList) + + for { + record := func() { stats.Record(ctx, mKubeconfigsSnapshotIn.M(1)) } + + select { + case <-timer.C: + sync() + case <-ctx.Done(): + close(snapshots) + done.Wait() + close(errs) + return + case <-c.forceEmit: + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + case kubeConfigNamespacedList := <-kubeConfigChan: + record() + + namespace := kubeConfigNamespacedList.namespace + + // merge lists by namespace + kubeconfigsByNamespace[namespace] = kubeConfigNamespacedList.list + var kubeConfigList KubeConfigList + for _, kubeconfigs := range kubeconfigsByNamespace { + kubeConfigList = append(kubeConfigList, kubeconfigs...) + } + currentSnapshot.Kubeconfigs = kubeConfigList.Sort() + } + } + }() + return snapshots, errs, nil +} diff --git a/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go b/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go new file mode 100644 index 000000000..a28c005d4 --- /dev/null +++ b/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go @@ -0,0 +1,202 @@ +// Code generated by solo-kit. DO NOT EDIT. + +// +build solokit + +package group + +import ( + "context" + "os" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/solo-io/go-utils/kubeutils" + "github.com/solo-io/go-utils/log" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" + "k8s.io/client-go/kubernetes" + + // Needed to run tests in GKE + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + // From https://github.com/kubernetes/client-go/blob/53c7adfd0294caa142d961e1f780f74081d5b15f/examples/out-of-cluster-client-configuration/main.go#L31 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var _ = Describe("GroupEmitter", func() { + if os.Getenv("RUN_KUBE_TESTS") != "1" { + log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") + return + } + var ( + namespace1 string + namespace2 string + name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + kube kubernetes.Interface + emitter KubeconfigsEmitter + kubeConfigClient KubeConfigClient + ) + + BeforeEach(func() { + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + kube = helpers.MustKubeClient() + err := kubeutils.CreateNamespacesInParallel(kube, namespace1, namespace2) + Expect(err).NotTo(HaveOccurred()) + // KubeConfig Constructor + kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + kubeConfigClient, err = NewKubeConfigClient(kubeConfigClientFactory) + Expect(err).NotTo(HaveOccurred()) + emitter = NewKubeconfigsEmitter(kubeConfigClient) + }) + AfterEach(func() { + err := kubeutils.DeleteNamespacesInParallelBlocking(kube, namespace1, namespace2) + Expect(err).NotTo(HaveOccurred()) + }) + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + + err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + + err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + }) + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + + err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + + err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + }) +}) diff --git a/test/mocks/v1alpha1/testing_snapshot_simple_emitter.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go similarity index 54% rename from test/mocks/v1alpha1/testing_snapshot_simple_emitter.sk.go rename to pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go index 52fc11597..2905a88b6 100644 --- a/test/mocks/v1alpha1/testing_snapshot_simple_emitter.sk.go +++ b/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go @@ -1,10 +1,10 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1alpha1 +package group import ( "context" - fmt "fmt" + "fmt" "time" "go.opencensus.io/stats" @@ -13,28 +13,28 @@ import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" ) -type TestingSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) +type KubeconfigsSimpleEmitter interface { + Snapshots(ctx context.Context) (<-chan *KubeconfigsSnapshot, <-chan error, error) } -func NewTestingSimpleEmitter(aggregatedWatch clients.ResourceWatch) TestingSimpleEmitter { - return NewTestingSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) +func NewKubeconfigsSimpleEmitter(aggregatedWatch clients.ResourceWatch) KubeconfigsSimpleEmitter { + return NewKubeconfigsSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) } -func NewTestingSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TestingSimpleEmitter { - return &testingSimpleEmitter{ +func NewKubeconfigsSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) KubeconfigsSimpleEmitter { + return &kubeconfigsSimpleEmitter{ aggregatedWatch: aggregatedWatch, forceEmit: emit, } } -type testingSimpleEmitter struct { +type kubeconfigsSimpleEmitter struct { forceEmit <-chan struct{} aggregatedWatch clients.ResourceWatch } -func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) { - snapshots := make(chan *TestingSnapshot) +func (c *kubeconfigsSimpleEmitter) Snapshots(ctx context.Context) (<-chan *KubeconfigsSnapshot, <-chan error, error) { + snapshots := make(chan *KubeconfigsSnapshot) errs := make(chan error) untyped, watchErrs, err := c.aggregatedWatch(ctx) @@ -42,10 +42,10 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn return nil, nil, err } - go errutils.AggregateErrs(ctx, errs, watchErrs, "testing-emitter") + go errutils.AggregateErrs(ctx, errs, watchErrs, "kubeconfigs-emitter") go func() { - originalSnapshot := TestingSnapshot{} + originalSnapshot := KubeconfigsSnapshot{} currentSnapshot := originalSnapshot.Clone() timer := time.NewTicker(time.Second * 1) sync := func() { @@ -53,7 +53,7 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn return } - stats.Record(ctx, mTestingSnapshotOut.M(1)) + stats.Record(ctx, mKubeconfigsSnapshotOut.M(1)) originalSnapshot = currentSnapshot.Clone() sentSnapshot := currentSnapshot.Clone() snapshots <- &sentSnapshot @@ -65,7 +65,7 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn }() for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } + record := func() { stats.Record(ctx, mKubeconfigsSnapshotIn.M(1)) } select { case <-timer.C: @@ -78,14 +78,14 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn case untypedList := <-untyped: record() - currentSnapshot = TestingSnapshot{} + currentSnapshot = KubeconfigsSnapshot{} for _, res := range untypedList { switch typed := res.(type) { - case *MockResource: - currentSnapshot.Mocks = append(currentSnapshot.Mocks, typed) + case *KubeConfig: + currentSnapshot.Kubeconfigs = append(currentSnapshot.Kubeconfigs, typed) default: select { - case errs <- fmt.Errorf("TestingSnapshotEmitter "+ + case errs <- fmt.Errorf("KubeconfigsSnapshotEmitter "+ "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): case <-ctx.Done(): return diff --git a/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go b/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go index 3fe79e1a9..994b3229d 100644 --- a/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" @@ -46,7 +46,7 @@ func NewKubeconfigsEventLoop(emitter KubeconfigsEmitter, syncer KubeconfigsSynce func (el *kubeconfigsEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") + opts.Ctx = contextutils.WithLogger(opts.Ctx, "group.event_loop") logger := contextutils.LoggerFrom(opts.Ctx) logger.Infof("event loop started") @@ -56,7 +56,7 @@ func (el *kubeconfigsEventLoop) Run(namespaces []string, opts clients.WatchOpts) if err != nil { return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") + go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each loop, cancel it before each loop var cancel context.CancelFunc = func() {} diff --git a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go b/pkg/multicluster/v1/kubeconfigs_event_loop_test.go index 5e0573c93..1cc4f430b 100644 --- a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go +++ b/pkg/multicluster/v1/kubeconfigs_event_loop_test.go @@ -2,7 +2,7 @@ // +build solokit -package v1 +package group import ( "context" diff --git a/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go b/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go index 658724dd9..46cf4ff4e 100644 --- a/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" @@ -43,7 +43,7 @@ func NewKubeconfigsSimpleEventLoop(emitter KubeconfigsSimpleEmitter, syncers ... } func (el *kubeconfigsSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") + ctx = contextutils.WithLogger(ctx, "group.event_loop") logger := contextutils.LoggerFrom(ctx) logger.Infof("event loop started") @@ -54,7 +54,7 @@ func (el *kubeconfigsSimpleEventLoop) Run(ctx context.Context) (<-chan error, er return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") + go errutils.AggregateErrs(ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each syncer for each loop, cancel each before each loop syncerCancels := make(map[KubeconfigsSyncer]context.CancelFunc) diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go index 46ad8201c..7ef8d190b 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "fmt" diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go index dbd6f2ad5..21e331797 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "sync" diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go index aa576b20c..a28c005d4 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go @@ -2,7 +2,7 @@ // +build solokit -package v1 +package group import ( "context" @@ -26,7 +26,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) -var _ = Describe("V1Emitter", func() { +var _ = Describe("GroupEmitter", func() { if os.Getenv("RUN_KUBE_TESTS") != "1" { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go index 4d9518ea2..2905a88b6 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" diff --git a/pkg/multicluster/v1/multicluster.solo.io_suite_test.go b/pkg/multicluster/v1/multicluster.solo.io_suite_test.go index cfb6e05a4..a4d66f02d 100644 --- a/pkg/multicluster/v1/multicluster.solo.io_suite_test.go +++ b/pkg/multicluster/v1/multicluster.solo.io_suite_test.go @@ -36,13 +36,13 @@ var ( Expect(err).NotTo(HaveOccurred()) clientset, err := apiexts.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) - testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("fakes.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) + testutils.ErrorNotOccuredOrNotFound(err) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/test/mocks/api/solo-kit.json b/test/mocks/api/solo-kit.json new file mode 100644 index 000000000..c8f4b8ed9 --- /dev/null +++ b/test/mocks/api/solo-kit.json @@ -0,0 +1,63 @@ +{ + "title": "Solo-Kit Testing", + "description": "mock solo-kit project", + "api_groups": [ + { + "name": "testing.solo.io", + "docs_dir": "test/mocks/docs", + "conversion_go_package": "github.com/solo-io/solo-kit/test/mocks/conversion", + "resource_group_go_package": "github.com/solo-io/solo-kit/test/mocks/group", + "imports": [ + "github.com/solo-io/solo-kit/api/external/kubernetes" + ], + "resource_groups": { + "testing.solo.io": [ + { + "name": "MockResource", + "package": "testing.solo.io" + }, + { + "name": "FakeResource", + "package": "testing.solo.io" + }, + { + "name": "AnotherMockResource", + "package": "testing.solo.io" + }, + { + "name": "ClusterResource", + "package": "testing.solo.io" + }, + { + "name": "MockCustomType", + "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + }, + { + "name": "Pod", + "package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + } + ] + }, + "version_configs": [ + { + "version": "v2alpha1" + }, + { + "version": "v1alpha1", + "crd_group_override": "crds.testing.solo.io" + }, + { + "version": "v1", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype", + "type": "MockCustomType", + "plural_name": "mcts", + "short_name": "mct" + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/test/mocks/api/v1/mock_resources.proto b/test/mocks/api/v1/mock_resources.proto index 575def583..f4e7de8b1 100644 --- a/test/mocks/api/v1/mock_resources.proto +++ b/test/mocks/api/v1/mock_resources.proto @@ -63,6 +63,7 @@ message FakeResource { uint32 count = 1; core.solo.io.Metadata metadata = 7 [(gogoproto.nullable) = false]; + core.solo.io.Status status = 6 [(gogoproto.nullable) = false]; } /* diff --git a/test/mocks/api/v1/solo-kit.json b/test/mocks/api/v1/solo-kit.json deleted file mode 100644 index 801367bc9..000000000 --- a/test/mocks/api/v1/solo-kit.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "title": "Solo-Kit Testing", - "name": "testing.solo.io", - "version": "v1", - "docs_dir": "../doc", - "imports": [ - "github.com/solo-io/solo-kit/api/external/kubernetes" - ], - "custom_resources": [ - { - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype", - "type": "MockCustomType", - "plural_name": "mcts", - "short_name": "mct" - } - ], - "resource_groups": { - "testing.solo.io": [ - { - "name": "MockResource", - "package": "testing.solo.io" - }, - { - "name": "FakeResource", - "package": "testing.solo.io" - }, - { - "name": "AnotherMockResource", - "package": "testing.solo.io" - }, - { - "name": "ClusterResource", - "package": "testing.solo.io" - }, - { - "name": "MockCustomType", - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" - }, - { - "name": "Pod", - "package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - } - ] - } -} \ No newline at end of file diff --git a/test/mocks/api/v1alpha1/solo-kit.json b/test/mocks/api/v1alpha1/solo-kit.json deleted file mode 100644 index c90ef0c26..000000000 --- a/test/mocks/api/v1alpha1/solo-kit.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "title": "Solo-Kit Testing", - "name": "testing.solo.io", - "version": "v1alpha1", - "docs_dir": "../doc", - "crd_group_override": "crds.testing.solo.io", - "resource_groups": { - "testing.solo.io": [ - { - "name": "MockResource", - "package": "testing.solo.io" - } - ] - } -} \ No newline at end of file diff --git a/test/mocks/api/v2alpha1/solo-kit.json b/test/mocks/api/v2alpha1/solo-kit.json deleted file mode 100644 index fd99b3e8a..000000000 --- a/test/mocks/api/v2alpha1/solo-kit.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "title": "Solo-Kit Testing v2alpha1", - "name": "testing.solo.io", - "version": "v2alpha1", - "resource_groups": { - "testing.solo.io": [ - { - "name": "MockResource", - "package": "testing.solo.io", - "version": "v2alpha1" - }, - { - "name": "FakeResource", - "package": "testing.solo.io", - "version": "v1" - } - ] - } -} \ No newline at end of file diff --git a/test/mocks/conversion/conversion_suite_test.go b/test/mocks/conversion/conversion_suite_test.go new file mode 100644 index 000000000..6dafe4cbe --- /dev/null +++ b/test/mocks/conversion/conversion_suite_test.go @@ -0,0 +1,15 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package conversion_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestConversion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Conversion Suite") +} diff --git a/test/mocks/conversion/resource_converter.sk.go b/test/mocks/conversion/resource_converter.sk.go new file mode 100644 index 000000000..1742faff5 --- /dev/null +++ b/test/mocks/conversion/resource_converter.sk.go @@ -0,0 +1,143 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package conversion + +import ( + "errors" + + "github.com/solo-io/go-utils/versionutils/kubeapi" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + v1 "github.com/solo-io/solo-kit/test/mocks/v1" + v1alpha1 "github.com/solo-io/solo-kit/test/mocks/v1alpha1" + v2alpha1 "github.com/solo-io/solo-kit/test/mocks/v2alpha1" +) + +type FakeResourceUpConverter interface { + FromV1Alpha1ToV1(src *v1alpha1.FakeResource) *v1.FakeResource +} + +type FakeResourceDownConverter interface { + FromV1ToV1Alpha1(src *v1.FakeResource) *v1alpha1.FakeResource +} + +type fakeResourceConverter struct { + upConverter FakeResourceUpConverter + downConverter FakeResourceDownConverter +} + +func NewFakeResourceConverter(u FakeResourceUpConverter, d FakeResourceDownConverter) crd.Converter { + return &fakeResourceConverter{ + upConverter: u, + downConverter: d, + } +} + +func (c *fakeResourceConverter) Convert(src, dst crd.SoloKitCrd) error { + srcVersion, err := kubeapi.ParseVersion(src.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + dstVersion, err := kubeapi.ParseVersion(dst.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + + if srcVersion.GreaterThan(dstVersion) { + return c.convertDown(src, dst) + } else if srcVersion.LessThan(dstVersion) { + return c.convertUp(src, dst) + } + return crd.Copy(src, dst) +} + +func (c *fakeResourceConverter) convertUp(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + case *v1alpha1.FakeResource: + return c.convertUp(c.upConverter.FromV1Alpha1ToV1(t), dst) + } + return errors.New("unrecognized source type, this should never happen") +} + +func (c *fakeResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + case *v1.FakeResource: + return c.convertDown(c.downConverter.FromV1ToV1Alpha1(t), dst) + } + return errors.New("unrecognized source type, this should never happen") +} + +type MockResourceUpConverter interface { + FromV1Alpha1ToV1(src *v1alpha1.MockResource) *v1.MockResource + FromV1ToV2Alpha1(src *v1.MockResource) *v2alpha1.MockResource +} + +type MockResourceDownConverter interface { + FromV1ToV1Alpha1(src *v1.MockResource) *v1alpha1.MockResource + FromV2Alpha1ToV1(src *v2alpha1.MockResource) *v1.MockResource +} + +type mockResourceConverter struct { + upConverter MockResourceUpConverter + downConverter MockResourceDownConverter +} + +func NewMockResourceConverter(u MockResourceUpConverter, d MockResourceDownConverter) crd.Converter { + return &mockResourceConverter{ + upConverter: u, + downConverter: d, + } +} + +func (c *mockResourceConverter) Convert(src, dst crd.SoloKitCrd) error { + srcVersion, err := kubeapi.ParseVersion(src.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + dstVersion, err := kubeapi.ParseVersion(dst.GetObjectKind().GroupVersionKind().Version) + if err != nil { + return err + } + + if srcVersion.GreaterThan(dstVersion) { + return c.convertDown(src, dst) + } else if srcVersion.LessThan(dstVersion) { + return c.convertUp(src, dst) + } + return crd.Copy(src, dst) +} + +func (c *mockResourceConverter) convertUp(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + case *v1alpha1.MockResource: + return c.convertUp(c.upConverter.FromV1Alpha1ToV1(t), dst) + case *v1.MockResource: + return c.convertUp(c.upConverter.FromV1ToV2Alpha1(t), dst) + } + return errors.New("unrecognized source type, this should never happen") +} + +func (c *mockResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { + if src.GetObjectKind().GroupVersionKind().Version == dst.GetObjectKind().GroupVersionKind().Version { + return crd.Copy(src, dst) + } + + switch t := src.(type) { + case *v1.MockResource: + return c.convertDown(c.downConverter.FromV1ToV1Alpha1(t), dst) + case *v2alpha1.MockResource: + return c.convertDown(c.downConverter.FromV2Alpha1ToV1(t), dst) + } + return errors.New("unrecognized source type, this should never happen") +} diff --git a/test/mocks/conversion/resource_converter_test.go b/test/mocks/conversion/resource_converter_test.go new file mode 100644 index 000000000..b73e96453 --- /dev/null +++ b/test/mocks/conversion/resource_converter_test.go @@ -0,0 +1,110 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package conversion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + "github.com/solo-io/solo-kit/test/mocks/conversion" + v1 "github.com/solo-io/solo-kit/test/mocks/v1" + v1alpha1 "github.com/solo-io/solo-kit/test/mocks/v1alpha1" + v2alpha1 "github.com/solo-io/solo-kit/test/mocks/v2alpha1" +) + +var converter crd.Converter + +var _ = Describe("FakeResourceConverter", func() { + BeforeEach(func() { + converter = conversion.NewFakeResourceConverter(fakeResourceUpConverter{}, fakeResourceDownConverter{}) + }) + + Describe("Convert", func() { + It("works for noop conversions", func() { + src := &v1alpha1.FakeResource{Metadata: core.Metadata{Name: "test"}} + dst := &v1alpha1.FakeResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("test")) + }) + + It("converts all the way up", func() { + src := &v1alpha1.FakeResource{} + dst := &v1.FakeResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("v1")) + }) + + It("converts all the way down", func() { + src := &v1.FakeResource{} + dst := &v1alpha1.FakeResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("v1alpha1")) + }) + }) +}) + +type fakeResourceUpConverter struct{} + +func (fakeResourceUpConverter) FromV1Alpha1ToV1(src *v1alpha1.FakeResource) *v1.FakeResource { + return &v1.FakeResource{Metadata: core.Metadata{Name: "v1"}} +} + +type fakeResourceDownConverter struct{} + +func (fakeResourceDownConverter) FromV1ToV1Alpha1(src *v1.FakeResource) *v1alpha1.FakeResource { + return &v1alpha1.FakeResource{Metadata: core.Metadata{Name: "v1alpha1"}} +} + +var _ = Describe("MockResourceConverter", func() { + BeforeEach(func() { + converter = conversion.NewMockResourceConverter(mockResourceUpConverter{}, mockResourceDownConverter{}) + }) + + Describe("Convert", func() { + It("works for noop conversions", func() { + src := &v1alpha1.MockResource{Metadata: core.Metadata{Name: "test"}} + dst := &v1alpha1.MockResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("test")) + }) + + It("converts all the way up", func() { + src := &v1alpha1.MockResource{} + dst := &v2alpha1.MockResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("v2alpha1")) + }) + + It("converts all the way down", func() { + src := &v2alpha1.MockResource{} + dst := &v1alpha1.MockResource{} + err := converter.Convert(src, dst) + Expect(err).NotTo(HaveOccurred()) + Expect(dst.GetMetadata().Name).To(Equal("v1alpha1")) + }) + }) +}) + +type mockResourceUpConverter struct{} + +func (mockResourceUpConverter) FromV1Alpha1ToV1(src *v1alpha1.MockResource) *v1.MockResource { + return &v1.MockResource{Metadata: core.Metadata{Name: "v1"}} +} +func (mockResourceUpConverter) FromV1ToV2Alpha1(src *v1.MockResource) *v2alpha1.MockResource { + return &v2alpha1.MockResource{Metadata: core.Metadata{Name: "v2alpha1"}} +} + +type mockResourceDownConverter struct{} + +func (mockResourceDownConverter) FromV1ToV1Alpha1(src *v1.MockResource) *v1alpha1.MockResource { + return &v1alpha1.MockResource{Metadata: core.Metadata{Name: "v1alpha1"}} +} +func (mockResourceDownConverter) FromV2Alpha1ToV1(src *v2alpha1.MockResource) *v1.MockResource { + return &v1.MockResource{Metadata: core.Metadata{Name: "v1"}} +} diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/core/base.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/core/base.proto.sk.md new file mode 100644 index 000000000..d1b9d73bd --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/core/base.proto.sk.md @@ -0,0 +1,367 @@ + + +### Package: `envoy.api.v2.core` +#### Types: + + +- [Locality](#Locality) +- [Node](#Node) +- [Metadata](#Metadata) +- [RuntimeUInt32](#RuntimeUInt32) +- [HeaderValue](#HeaderValue) +- [HeaderValueOption](#HeaderValueOption) +- [HeaderMap](#HeaderMap) +- [DataSource](#DataSource) +- [TransportSocket](#TransportSocket) +- [SocketOption](#SocketOption) +- [SocketState](#SocketState) +- [RuntimeFractionalPercent](#RuntimeFractionalPercent) +- [ControlPlane](#ControlPlane) + + + + +##### Enums: + + + - [RoutingPriority](#RoutingPriority) + - [RequestMethod](#RequestMethod) + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/envoy/api/v2/core/base.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/envoy/api/v2/core/base.proto) + + + + + +--- +### Locality + + +Identifies location of where either Envoy runs or where upstream hosts run. + +```yaml +"region": string +"zone": string +"subZone": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `region` | `string` | Region this :ref:`zone ` belongs to. | | +| `zone` | `string` | Defines the local service zone where Envoy is running. Though optional, it should be set if discovery service routing is used and the discovery service exposes :ref:`zone data `, either in this message or via :option:`--service-zone`. The meaning of zone is context dependent, e.g. `Availability Zone (AZ) `_ on AWS, `Zone `_ on GCP, etc. | | +| `subZone` | `string` | When used for locality of upstream hosts, this field further splits zone into smaller chunks of sub-zones so they can be load balanced independently. | | + + + + +--- +### Node + + +Identifies a specific Envoy instance. The node identifier is presented to the +management server, which may use this identifier to distinguish per Envoy +configuration for serving. + +```yaml +"id": string +"cluster": string +"metadata": .google.protobuf.Struct +"locality": .envoy.api.v2.core.Locality +"buildVersion": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `id` | `string` | An opaque node identifier for the Envoy node. This also provides the local service node name. It should be set if any of the following features are used: :ref:`statsd `, :ref:`CDS `, and :ref:`HTTP tracing `, either in this message or via :option:`--service-node`. | | +| `cluster` | `string` | Defines the local service cluster name where Envoy is running. Though optional, it should be set if any of the following features are used: :ref:`statsd `, :ref:`health check cluster verification `, :ref:`runtime override directory `, :ref:`user agent addition `, :ref:`HTTP global rate limiting `, :ref:`CDS `, and :ref:`HTTP tracing `, either in this message or via :option:`--service-cluster`. | | +| `metadata` | [.google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/struct) | Opaque metadata extending the node identifier. Envoy will pass this directly to the management server. | | +| `locality` | [.envoy.api.v2.core.Locality](base.proto.sk.md#Locality) | Locality specifying where the Envoy instance is running. | | +| `buildVersion` | `string` | This is motivated by informing a management server during canary which version of Envoy is being tested in a heterogeneous fleet. This will be set by Envoy in management server RPCs. | | + + + + +--- +### Metadata + + +Metadata provides additional inputs to filters based on matched listeners, +filter chains, routes and endpoints. It is structured as a map, usually from +filter name (in reverse DNS format) to metadata specific to the filter. Metadata +key-values for a filter are merged as connection and request handling occurs, +with later values for the same key overriding earlier values. + +An example use of metadata is providing additional values to +http_connection_manager in the envoy.http_connection_manager.access_log +namespace. + +Another example use of metadata is to per service config info in cluster metadata, which may get +consumed by multiple filters. + +For load balancing, Metadata provides a means to subset cluster endpoints. +Endpoints have a Metadata object associated and routes contain a Metadata +object to match against. There are some well defined metadata used today for +this purpose: + +* ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an + endpoint and is also used during header processing + (x-envoy-upstream-canary) and for stats purposes. + +```yaml +"filterMetadata": map + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `filterMetadata` | `map` | Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* namespace is reserved for Envoy's built-in filters. | | + + + + +--- +### RuntimeUInt32 + + +Runtime derived uint32 with a default when not specified. + +```yaml +"defaultValue": int +"runtimeKey": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `defaultValue` | `int` | Default value if runtime value is not available. | | +| `runtimeKey` | `string` | Runtime key to get value for comparison. This value is used if defined. | | + + + + +--- +### HeaderValue + + +Header name/value pair. + +```yaml +"key": string +"value": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `key` | `string` | Header name. | | +| `value` | `string` | Header value. The same :ref:`format specifier ` as used for :ref:`HTTP access logging ` applies here, however unknown header values are replaced with the empty string instead of `-`. | | + + + + +--- +### HeaderValueOption + + +Header name/value pair plus option to control append behavior. + +```yaml +"header": .envoy.api.v2.core.HeaderValue +"append": .google.protobuf.BoolValue + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `header` | [.envoy.api.v2.core.HeaderValue](base.proto.sk.md#HeaderValue) | Header name/value pair that this option applies to. | | +| `append` | [.google.protobuf.BoolValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/bool-value) | Should the value be appended? If true (default), the value is appended to existing values. | | + + + + +--- +### HeaderMap + + +Wrapper for a set of headers. + +```yaml +"headers": []envoy.api.v2.core.HeaderValue + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `headers` | [[]envoy.api.v2.core.HeaderValue](base.proto.sk.md#HeaderValue) | | | + + + + +--- +### DataSource + + +Data source consisting of either a file or an inline value. + +```yaml +"filename": string +"inlineBytes": bytes +"inlineString": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `filename` | `string` | Local filesystem data source. | | +| `inlineBytes` | `bytes` | Bytes inlined in the configuration. | | +| `inlineString` | `string` | String inlined in the configuration. | | + + + + +--- +### TransportSocket + + +Configuration for transport socket in :ref:`listeners ` and +:ref:`clusters `. If the configuration is +empty, a default transport socket implementation and configuration will be +chosen based on the platform and existence of tls_context. + +```yaml +"name": string +"config": .google.protobuf.Struct +"typedConfig": .google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | The name of the transport socket to instantiate. The name must match a supported transport socket implementation. | | +| `config` | [.google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/struct) | | | +| `typedConfig` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | | | + + + + +--- +### SocketOption + + +Generic socket option message. This would be used to set socket options that +might not exist in upstream kernels or precompiled Envoy binaries. + +```yaml +"description": string +"level": int +"name": int +"intValue": int +"bufValue": bytes +"state": .envoy.api.v2.core.SocketOption.SocketState + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `description` | `string` | An optional name to give this socket option for debugging, etc. Uniqueness is not required and no special meaning is assumed. | | +| `level` | `int` | Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP | | +| `name` | `int` | The numeric name as passed to setsockopt | | +| `intValue` | `int` | Because many sockopts take an int value. | | +| `bufValue` | `bytes` | Otherwise it's a byte buffer. | | +| `state` | [.envoy.api.v2.core.SocketOption.SocketState](base.proto.sk.md#SocketState) | The state in which the option will be applied. When used in BindConfig STATE_PREBIND is currently the only valid value. | | + + + + +--- +### SocketState + + + +| Name | Description | +| ----- | ----------- | +| `STATE_PREBIND` | Socket options are applied after socket creation but before binding the socket to a port | +| `STATE_BOUND` | Socket options are applied after binding the socket to a port but before calling listen() | +| `STATE_LISTENING` | Socket options are applied after calling listen() | + + + + +--- +### RuntimeFractionalPercent + + +Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +specified via a runtime key. + +```yaml +"defaultValue": .envoy.type.FractionalPercent +"runtimeKey": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `defaultValue` | [.envoy.type.FractionalPercent](../../../type/percent.proto.sk.md#FractionalPercent) | Default value if the runtime value's for the numerator/denominator keys are not available. | | +| `runtimeKey` | `string` | Runtime key for a YAML representation of a FractionalPercent. | | + + + + +--- +### ControlPlane + + +Identifies a specific ControlPlane instance that Envoy is connected to. + +```yaml +"identifier": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `identifier` | `string` | An opaque control plane identifier that uniquely identifies an instance of control plane. This can be used to identify which control plane instance, the Envoy is connected to. | | + + + + +### RoutingPriority + +Description: Envoy supports :ref:`upstream priority routing +` both at the route and the virtual +cluster level. The current priority implementation uses different connection +pool and circuit breaking settings for each priority level. This means that +even for HTTP/2 requests, two physical connections will be used to an +upstream host. In the future Envoy will likely support true HTTP/2 priority +over a single upstream connection. + +| Name | Description | +| ----- | ----------- | +| DEFAULT | | +| HIGH | | + +### RequestMethod + +Description: HTTP request method. + +| Name | Description | +| ----- | ----------- | +| METHOD_UNSPECIFIED | | +| GET | | +| HEAD | | +| POST | | +| PUT | | +| DELETE | | +| CONNECT | | +| OPTIONS | | +| TRACE | | + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md new file mode 100644 index 000000000..e57357ab6 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md @@ -0,0 +1,179 @@ + + +### Package: `envoy.api.v2` +#### Types: + + +- [DiscoveryRequest](#DiscoveryRequest) +- [DiscoveryResponse](#DiscoveryResponse) +- [DeltaDiscoveryRequest](#DeltaDiscoveryRequest) +- [DeltaDiscoveryResponse](#DeltaDiscoveryResponse) +- [Resource](#Resource) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/envoy/api/v2/discovery.proto) + + + + + +--- +### DiscoveryRequest + + +A DiscoveryRequest requests a set of versioned resources of the same type for +a given Envoy node on some API. + +```yaml +"versionInfo": string +"node": .envoy.api.v2.core.Node +"resourceNames": []string +"typeUrl": string +"responseNonce": string +"errorDetail": .google.rpc.Status + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `versionInfo` | `string` | The version_info provided in the request messages will be the version_info received with the most recent successfully processed response or empty on the first request. It is expected that no new request is sent after a response is received until the Envoy instance is ready to ACK/NACK the new configuration. ACK/NACK takes place by returning the new API config version as applied or the previous API config version respectively. Each type_url (see below) has an independent version associated with it. | | +| `node` | [.envoy.api.v2.core.Node](core/base.proto.sk.md#Node) | The node making the request. | | +| `resourceNames` | `[]string` | List of resources to subscribe to, e.g. list of cluster names or a route configuration name. If this is empty, all resources for the API are returned. LDS/CDS expect empty resource_names, since this is global discovery for the Envoy instance. The LDS and CDS responses will then imply a number of resources that need to be fetched via EDS/RDS, which will be explicitly enumerated in resource_names. | | +| `typeUrl` | `string` | Type of the resource that is being requested, e.g. "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit in requests made via singleton xDS APIs such as CDS, LDS, etc. but is required for ADS. | | +| `responseNonce` | `string` | nonce corresponding to DiscoveryResponse being ACK/NACKed. See above discussion on version_info and the DiscoveryResponse nonce comment. This may be empty if no nonce is available, e.g. at startup or for non-stream xDS implementations. | | +| `errorDetail` | [.google.rpc.Status](../../../google/rpc/status.proto.sk.md#Status) | This is populated when the previous :ref:`DiscoveryResponse ` failed to update configuration. The *message* field in *error_details* provides the Envoy internal exception related to the failure. It is only intended for consumption during manual debugging, the string provided is not guaranteed to be stable across Envoy versions. | | + + + + +--- +### DiscoveryResponse + + + +```yaml +"versionInfo": string +"resources": []google.protobuf.Any +"canary": bool +"typeUrl": string +"nonce": string +"controlPlane": .envoy.api.v2.core.ControlPlane + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `versionInfo` | `string` | The version of the response data. | | +| `resources` | [[]google.protobuf.Any](../../../google/protobuf/any.proto.sk.md#Any) | The response resources. These resources are typed and depend on the API being called. | | +| `canary` | `bool` | [#not-implemented-hide:] Canary is used to support two Envoy command line flags: * --terminate-on-canary-transition-failure. When set, Envoy is able to terminate if it detects that configuration is stuck at canary. Consider this example sequence of updates: - Management server applies a canary config successfully. - Management server rolls back to a production config. - Envoy rejects the new production config. Since there is no sensible way to continue receiving configuration updates, Envoy will then terminate and apply production config from a clean slate. * --dry-run-canary. When set, a canary response will never be applied, only validated via a dry run. | | +| `typeUrl` | `string` | Type URL for resources. This must be consistent with the type_url in the Any messages for resources if resources is non-empty. This effectively identifies the xDS API when muxing over ADS. | | +| `nonce` | `string` | For gRPC based subscriptions, the nonce provides a way to explicitly ack a specific DiscoveryResponse in a following DiscoveryRequest. Additional messages may have been sent by Envoy to the management server for the previous version on the stream prior to this DiscoveryResponse, that were unprocessed at response send time. The nonce allows the management server to ignore any further DiscoveryRequests for the previous version until a DiscoveryRequest bearing the nonce. The nonce is optional and is not required for non-stream based xDS implementations. | | +| `controlPlane` | [.envoy.api.v2.core.ControlPlane](core/base.proto.sk.md#ControlPlane) | [#not-implemented-hide:] The control plane instance that sent the response. | | + + + + +--- +### DeltaDiscoveryRequest + + +DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +endpoint for Delta xDS. + +With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +diff to the state of a xDS client. +In Delta XDS there are per resource versions, which allow tracking state at +the resource granularity. +An xDS Delta session is always in the context of a gRPC bidirectional +stream. This allows the xDS server to keep track of the state of xDS clients +connected to it. + +In Delta xDS the nonce field is required and used to pair +DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. +Optionally, a response message level system_version_info is present for +debugging purposes only. + +DeltaDiscoveryRequest can be sent in 3 situations: + 1. Initial message in a xDS bidirectional gRPC stream. + 2. As a ACK or NACK response to a previous DeltaDiscoveryResponse. + In this case the response_nonce is set to the nonce value in the Response. + ACK or NACK is determined by the absence or presence of error_detail. + 3. Spontaneous DeltaDiscoveryRequest from the client. + This can be done to dynamically add or remove elements from the tracked + resource_names set. In this case response_nonce must be omitted. + +```yaml +"node": .envoy.api.v2.core.Node +"typeUrl": string +"resourceNamesSubscribe": []string +"resourceNamesUnsubscribe": []string +"initialResourceVersions": map +"responseNonce": string +"errorDetail": .google.rpc.Status + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `node` | [.envoy.api.v2.core.Node](core/base.proto.sk.md#Node) | The node making the request. | | +| `typeUrl` | `string` | Type of the resource that is being requested, e.g. "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit in requests made via singleton xDS APIs such as CDS, LDS, etc. but is required for ADS. | | +| `resourceNamesSubscribe` | `[]string` | DeltaDiscoveryRequests allow the client to add or remove individual resources to the set of tracked resources in the context of a stream. All resource names in the resource_names_subscribe list are added to the set of tracked resources and all resource names in the resource_names_unsubscribe list are removed from the set of tracked resources. Unlike in state-of-the-world xDS, an empty resource_names_subscribe or resource_names_unsubscribe list simply means that no resources are to be added or removed to the resource list. The xDS server must send updates for all tracked resources but can also send updates for resources the client has not subscribed to. This behavior is similar to state-of-the-world xDS. These two fields can be set for all types of DeltaDiscoveryRequests (initial, ACK/NACK or spontaneous). A list of Resource names to add to the list of tracked resources. | | +| `resourceNamesUnsubscribe` | `[]string` | A list of Resource names to remove from the list of tracked resources. | | +| `initialResourceVersions` | `map` | This map must be populated when the DeltaDiscoveryRequest is the first in a stream (assuming there are any resources - this field's purpose is to enable a session to continue in a reconnected gRPC stream, and so will not be used in the very first stream of a session). The keys are the resources names of the xDS resources known to the xDS client. The values in the map are the associated resource level version info. | | +| `responseNonce` | `string` | When the DeltaDiscoveryRequest is a ACK or NACK message in response to a previous DeltaDiscoveryResponse, the response_nonce must be the nonce in the DeltaDiscoveryResponse. Otherwise response_nonce must be omitted. | | +| `errorDetail` | [.google.rpc.Status](../../../google/rpc/status.proto.sk.md#Status) | This is populated when the previous :ref:`DiscoveryResponse ` failed to update configuration. The *message* field in *error_details* provides the Envoy internal exception related to the failure. | | + + + + +--- +### DeltaDiscoveryResponse + + + +```yaml +"systemVersionInfo": string +"resources": []envoy.api.v2.Resource +"removedResources": []string +"nonce": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `systemVersionInfo` | `string` | The version of the response data (used for debugging). | | +| `resources` | [[]envoy.api.v2.Resource](discovery.proto.sk.md#Resource) | The response resources. These are typed resources that match the type url in the DeltaDiscoveryRequest. | | +| `removedResources` | `[]string` | Resources names of resources that have be deleted and to be removed from the xDS Client. Removed resources for missing resources can be ignored. | | +| `nonce` | `string` | The nonce provides a way for DeltaDiscoveryRequests to uniquely reference a DeltaDiscoveryResponse. The nonce is required. | | + + + + +--- +### Resource + + + +```yaml +"name": string +"version": string +"resource": .google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | The resource's name, to distinguish it from others of the same type of resource. | | +| `version` | `string` | The resource level version. It allows xDS to track the state of individual resources. | | +| `resource` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | The resource being tracked. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto.sk.md new file mode 100644 index 000000000..756d49b18 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto.sk.md @@ -0,0 +1,81 @@ + + +### Package: `envoy.type` +#### Types: + + +- [Percent](#Percent) +- [FractionalPercent](#FractionalPercent) +- [DenominatorType](#DenominatorType) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/envoy/type/percent.proto) + + + + + +--- +### Percent + + +Identifies a percentage, in the range [0.0, 100.0]. + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | | | + + + + +--- +### FractionalPercent + + +A fractional percentage is used in cases in which for performance reasons performing floating +point to integer conversions during randomness calculations is undesirable. The message includes +both a numerator and denominator that together determine the final fractional value. + +* **Example**: 1/100 = 1%. +* **Example**: 3/10000 = 0.03%. + +```yaml +"numerator": int +"denominator": .envoy.type.FractionalPercent.DenominatorType + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `numerator` | `int` | Specifies the numerator. Defaults to 0. | | +| `denominator` | [.envoy.type.FractionalPercent.DenominatorType](percent.proto.sk.md#DenominatorType) | Specifies the denominator. If the denominator specified is less than the numerator, the final fractional percentage is capped at 1 (100%). | | + + + + +--- +### DenominatorType + + +Fraction percentages support several fixed denominator values. + +| Name | Description | +| ----- | ----------- | +| `HUNDRED` | 100. **Example**: 1/100 = 1%. | +| `TEN_THOUSAND` | 10,000. **Example**: 1/10000 = 0.01%. | +| `MILLION` | 1,000,000. **Example**: 1/1000000 = 0.0001%. | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/gogoproto/gogo.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/gogoproto/gogo.proto.sk.md new file mode 100644 index 000000000..d42b69d5f --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/gogoproto/gogo.proto.sk.md @@ -0,0 +1,44 @@ + + +### Package: `gogoproto` +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/gogoproto/gogo.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/gogoproto/gogo.proto) + + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/annotations.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/annotations.proto.sk.md new file mode 100644 index 000000000..0a4e481a4 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/annotations.proto.sk.md @@ -0,0 +1,30 @@ + + +### Package: `google.api` +Copyright (c) 2015, Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/api/annotations.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/api/annotations.proto) + + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/http.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/http.proto.sk.md new file mode 100644 index 000000000..9a8ca8c60 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/api/http.proto.sk.md @@ -0,0 +1,331 @@ + + +### Package: `google.api` +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +#### Types: + + +- [Http](#Http) +- [HttpRule](#HttpRule) +- [CustomHttpPattern](#CustomHttpPattern) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/api/http.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/api/http.proto) + + + + + +--- +### Http + + +Defines the HTTP configuration for an API service. It contains a list of +[HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +to one or more HTTP REST API methods. + +```yaml +"rules": []google.api.HttpRule +"fullyDecodeReservedExpansion": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `rules` | [[]google.api.HttpRule](http.proto.sk.md#HttpRule) | A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow "last one wins" order. | | +| `fullyDecodeReservedExpansion` | `bool` | When set to true, URL path parmeters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where "%2F" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches. | | + + + + +--- +### HttpRule + + +`HttpRule` defines the mapping of an RPC method to one or more HTTP +REST API methods. The mapping specifies how different portions of the RPC +request message are mapped to URL path, URL query parameters, and +HTTP request body. The mapping is typically specified as an +`google.api.http` annotation on the RPC method, +see "google/api/annotations.proto" for details. + +The mapping consists of a field specifying the path template and +method kind. The path template can refer to fields in the request +message, as in the example below which describes a REST GET +operation on a resource collection of messages: + + + service Messaging { + rpc GetMessage(GetMessageRequest) returns (Message) { + option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; + } + } + message GetMessageRequest { + message SubMessage { + string subfield = 1; + } + string message_id = 1; // mapped to the URL + SubMessage sub = 2; // `sub.subfield` is url-mapped + } + message Message { + string text = 1; // content of the resource + } + +The same http annotation can alternatively be expressed inside the +`GRPC API Configuration` YAML file. + + http: + rules: + - selector: .Messaging.GetMessage + get: /v1/messages/{message_id}/{sub.subfield} + +This definition enables an automatic, bidrectional mapping of HTTP +JSON to RPC. Example: + +HTTP | RPC +-----|----- +`GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` + +In general, not only fields but also field paths can be referenced +from a path pattern. Fields mapped to the path pattern cannot be +repeated and must have a primitive (non-message) type. + +Any fields in the request message which are not bound by the path +pattern automatically become (optional) HTTP query +parameters. Assume the following definition of the request message: + + + service Messaging { + rpc GetMessage(GetMessageRequest) returns (Message) { + option (google.api.http).get = "/v1/messages/{message_id}"; + } + } + message GetMessageRequest { + message SubMessage { + string subfield = 1; + } + string message_id = 1; // mapped to the URL + int64 revision = 2; // becomes a parameter + SubMessage sub = 3; // `sub.subfield` becomes a parameter + } + + +This enables a HTTP JSON to RPC mapping as below: + +HTTP | RPC +-----|----- +`GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` + +Note that fields which are mapped to HTTP parameters must have a +primitive type or a repeated primitive type. Message types are not +allowed. In the case of a repeated type, the parameter can be +repeated in the URL, as in `...?param=A¶m=B`. + +For HTTP method kinds which allow a request body, the `body` field +specifies the mapping. Consider a REST update method on the +message resource collection: + + + service Messaging { + rpc UpdateMessage(UpdateMessageRequest) returns (Message) { + option (google.api.http) = { + put: "/v1/messages/{message_id}" + body: "message" + }; + } + } + message UpdateMessageRequest { + string message_id = 1; // mapped to the URL + Message message = 2; // mapped to the body + } + + +The following HTTP JSON to RPC mapping is enabled, where the +representation of the JSON in the request body is determined by +protos JSON encoding: + +HTTP | RPC +-----|----- +`PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` + +The special name `*` can be used in the body mapping to define that +every field not bound by the path template should be mapped to the +request body. This enables the following alternative definition of +the update method: + + service Messaging { + rpc UpdateMessage(Message) returns (Message) { + option (google.api.http) = { + put: "/v1/messages/{message_id}" + body: "*" + }; + } + } + message Message { + string message_id = 1; + string text = 2; + } + + +The following HTTP JSON to RPC mapping is enabled: + +HTTP | RPC +-----|----- +`PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` + +Note that when using `*` in the body mapping, it is not possible to +have HTTP parameters, as all fields not bound by the path end in +the body. This makes this option more rarely used in practice of +defining REST APIs. The common usage of `*` is in custom methods +which don't use the URL at all for transferring data. + +It is possible to define multiple HTTP methods for one RPC by using +the `additional_bindings` option. Example: + + service Messaging { + rpc GetMessage(GetMessageRequest) returns (Message) { + option (google.api.http) = { + get: "/v1/messages/{message_id}" + additional_bindings { + get: "/v1/users/{user_id}/messages/{message_id}" + } + }; + } + } + message GetMessageRequest { + string message_id = 1; + string user_id = 2; + } + + +This enables the following two alternative HTTP JSON to RPC +mappings: + +HTTP | RPC +-----|----- +`GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +`GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` + +# Rules for HTTP mapping + +The rules for mapping HTTP path, query parameters, and body fields +to the request message are as follows: + +1. The `body` field specifies either `*` or a field path, or is + omitted. If omitted, it indicates there is no HTTP request body. +2. Leaf fields (recursive expansion of nested messages in the + request) can be classified into three types: + (a) Matched in the URL template. + (b) Covered by body (if body is `*`, everything except (a) fields; + else everything under the body field) + (c) All other fields. +3. URL query parameters found in the HTTP request are mapped to (c) fields. +4. Any body sent with an HTTP request can contain only (b) fields. + +The syntax of the path template is as follows: + + Template = "/" Segments [ Verb ] ; + Segments = Segment { "/" Segment } ; + Segment = "*" | "**" | LITERAL | Variable ; + Variable = "{" FieldPath [ "=" Segments ] "}" ; + FieldPath = IDENT { "." IDENT } ; + Verb = ":" LITERAL ; + +The syntax `*` matches a single path segment. The syntax `**` matches zero +or more path segments, which must be the last part of the path except the +`Verb`. The syntax `LITERAL` matches literal text in the path. + +The syntax `Variable` matches part of the URL path as specified by its +template. A variable template must not contain other variables. If a variable +matches a single path segment, its template may be omitted, e.g. `{var}` +is equivalent to `{var=*}`. + +If a variable contains exactly one path segment, such as `"{var}"` or +`"{var=*}"`, when such a variable is expanded into a URL path, all characters +except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the +Discovery Document as `{var}`. + +If a variable contains one or more path segments, such as `"{var=foo/*}"` +or `"{var=**}"`, when such a variable is expanded into a URL path, all +characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables +show up in the Discovery Document as `{+var}`. + +NOTE: While the single segment variable matches the semantics of +[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 +Simple String Expansion, the multi segment variable **does not** match +RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion +does not expand special characters like `?` and `#`, which would lead +to invalid URLs. + +NOTE: the field paths in variables and in the `body` must not refer to +repeated fields or map fields. + +```yaml +"selector": string +"get": string +"put": string +"post": string +"delete": string +"patch": string +"custom": .google.api.CustomHttpPattern +"body": string +"additionalBindings": []google.api.HttpRule + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `selector` | `string` | Selects methods to which this rule applies. Refer to [selector][google.api.DocumentationRule.selector] for syntax details. | | +| `get` | `string` | Used for listing and getting information about resources. | | +| `put` | `string` | Used for updating a resource. | | +| `post` | `string` | Used for creating a resource. | | +| `delete` | `string` | Used for deleting a resource. | | +| `patch` | `string` | Used for updating a resource. | | +| `custom` | [.google.api.CustomHttpPattern](http.proto.sk.md#CustomHttpPattern) | The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients. | | +| `body` | `string` | The name of the request field whose value is mapped to the HTTP body, or `*` for mapping all fields not captured by the path pattern to the HTTP body. NOTE: the referred field must not be a repeated field and must be present at the top-level of request message type. | | +| `additionalBindings` | [[]google.api.HttpRule](http.proto.sk.md#HttpRule) | Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep). | | + + + + +--- +### CustomHttpPattern + + +A custom pattern is used for defining custom HTTP verb. + +```yaml +"kind": string +"path": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `kind` | `string` | The name of this custom HTTP verb. | | +| `path` | `string` | The path matched by this custom verb. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md new file mode 100644 index 000000000..4b615defd --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md @@ -0,0 +1,141 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#### Types: + + +- [Any](#Any) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/any.proto) + + + + + +--- +### Any + + +`Any` contains an arbitrary serialized protocol buffer message along with a +URL that describes the type of the serialized message. + +Protobuf library provides support to pack/unpack Any values in the form +of utility functions or additional generated methods of the Any type. + +Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + +Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + +The pack methods provided by protobuf library will by default use +'type.googleapis.com/full.type.name' as the type URL and the unpack +methods only use the fully qualified type name after the last '/' +in the type URL, for example "foo.bar.com/x/y.z" will yield type +name "y.z". + + +JSON +==== +The JSON representation of an `Any` value uses the regular +representation of the deserialized, embedded message, with an +additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + +If the embedded message type is well-known and has a custom JSON +representation, that representation will be embedded adding a field +`value` which holds the custom JSON in addition to the `@type` +field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + +```yaml +"typeUrl": string +"value": bytes + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `typeUrl` | `string` | A URL/resource name whose content describes the type of the serialized protocol buffer message. For URLs which use the scheme `http`, `https`, or no scheme, the following restrictions and interpretations apply: * If no scheme is provided, `https` is assumed. * The last segment of the URL's path must represent the fully qualified name of the type (as in `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading "." is not accepted). * An HTTP GET on the URL must yield a [google.protobuf.Type][] value in binary format, or produce an error. * Applications are allowed to cache lookup results based on the URL, or have them precompiled into a binary to avoid any lookup. Therefore, binary compatibility needs to be preserved on changes to types. (Use versioned type names to manage breaking changes.) Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics. | | +| `value` | `bytes` | Must be a valid serialized protocol buffer of the above specified type. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/duration.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/duration.proto.sk.md new file mode 100644 index 000000000..3cfe758ca --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/duration.proto.sk.md @@ -0,0 +1,130 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#### Types: + + +- [Duration](#Duration) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/duration.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/duration.proto) + + + + + +--- +### Duration + + +A Duration represents a signed, fixed-length span of time represented +as a count of seconds and fractions of seconds at nanosecond +resolution. It is independent of any calendar and concepts like "day" +or "month". It is related to Timestamp in that the difference between +two Timestamp values is a Duration and it can be added or subtracted +from a Timestamp. Range is approximately +-10,000 years. + +# Examples + +Example 1: Compute Duration from two Timestamps in pseudo code. + + Timestamp start = ...; + Timestamp end = ...; + Duration duration = ...; + + duration.seconds = end.seconds - start.seconds; + duration.nanos = end.nanos - start.nanos; + + if (duration.seconds < 0 && duration.nanos > 0) { + duration.seconds += 1; + duration.nanos -= 1000000000; + } else if (durations.seconds > 0 && duration.nanos < 0) { + duration.seconds -= 1; + duration.nanos += 1000000000; + } + +Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. + + Timestamp start = ...; + Duration duration = ...; + Timestamp end = ...; + + end.seconds = start.seconds + duration.seconds; + end.nanos = start.nanos + duration.nanos; + + if (end.nanos < 0) { + end.seconds -= 1; + end.nanos += 1000000000; + } else if (end.nanos >= 1000000000) { + end.seconds += 1; + end.nanos -= 1000000000; + } + +Example 3: Compute Duration from datetime.timedelta in Python. + + td = datetime.timedelta(days=3, minutes=10) + duration = Duration() + duration.FromTimedelta(td) + +# JSON Mapping + +In JSON format, the Duration type is encoded as a string rather than an +object, where the string ends in the suffix "s" (indicating seconds) and +is preceded by the number of seconds, with nanoseconds expressed as +fractional seconds. For example, 3 seconds with 0 nanoseconds should be +encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +microsecond should be expressed in JSON format as "3.000001s". + +```yaml +"seconds": int +"nanos": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `seconds` | `int` | Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years | | +| `nanos` | `int` | Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/empty.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/empty.proto.sk.md new file mode 100644 index 000000000..de3c38367 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/empty.proto.sk.md @@ -0,0 +1,77 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#### Types: + + +- [Empty](#Empty) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/empty.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/empty.proto) + + + + + +--- +### Empty + + +A generic empty message that you can re-use to avoid defining duplicated +empty messages in your APIs. A typical example is to use it as the request +or the response type of an API method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + +The JSON representation for `Empty` is empty JSON object `{}`. + +```yaml + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/struct.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/struct.proto.sk.md new file mode 100644 index 000000000..fd94e7dff --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/struct.proto.sk.md @@ -0,0 +1,151 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#### Types: + + +- [Struct](#Struct) +- [Value](#Value) +- [ListValue](#ListValue) + + + + +##### Enums: + + + - [NullValue](#NullValue) + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/struct.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/struct.proto) + + + + + +--- +### Struct + + +`Struct` represents a structured data value, consisting of fields +which map to dynamically typed values. In some languages, `Struct` +might be supported by a native representation. For example, in +scripting languages like JS a struct is represented as an +object. The details of that representation are described together +with the proto support for the language. + +The JSON representation for `Struct` is JSON object. + +```yaml +"fields": map + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `fields` | `map` | Unordered map of dynamically typed values. | | + + + + +--- +### Value + + +`Value` represents a dynamically typed value which can be either +null, a number, a string, a boolean, a recursive struct value, or a +list of values. A producer of value is expected to set one of that +variants, absence of any variant indicates an error. + +The JSON representation for `Value` is JSON value. + +```yaml +"nullValue": .google.protobuf.NullValue +"numberValue": float +"stringValue": string +"boolValue": bool +"structValue": .google.protobuf.Struct +"listValue": .google.protobuf.ListValue + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `nullValue` | [.google.protobuf.NullValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/null-value) | Represents a null value. | | +| `numberValue` | `float` | Represents a double value. | | +| `stringValue` | `string` | Represents a string value. | | +| `boolValue` | `bool` | Represents a boolean value. | | +| `structValue` | [.google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/struct) | Represents a structured value. | | +| `listValue` | [.google.protobuf.ListValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/list-value) | Represents a repeated `Value`. | | + + + + +--- +### ListValue + + +`ListValue` is a wrapper around a repeated field of values. + +The JSON representation for `ListValue` is JSON array. + +```yaml +"values": []google.protobuf.Value + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `values` | [[]google.protobuf.Value](struct.proto.sk.md#Value) | Repeated field of dynamically typed values. | | + + + + +### NullValue + +Description: `NullValue` is a singleton enumeration to represent the null value for the +`Value` type union. + + The JSON representation for `NullValue` is JSON `null`. + +| Name | Description | +| ----- | ----------- | +| NULL_VALUE | Null value. | + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/timestamp.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/timestamp.proto.sk.md new file mode 100644 index 000000000..91f6c55ac --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/timestamp.proto.sk.md @@ -0,0 +1,148 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#### Types: + + +- [Timestamp](#Timestamp) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/timestamp.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/timestamp.proto) + + + + + +--- +### Timestamp + + +A Timestamp represents a point in time independent of any time zone +or calendar, represented as seconds and fractions of seconds at +nanosecond resolution in UTC Epoch time. It is encoded using the +Proleptic Gregorian Calendar which extends the Gregorian calendar +backwards to year one. It is encoded assuming all minutes are 60 +seconds long, i.e. leap seconds are "smeared" so that no leap second +table is needed for interpretation. Range is from +0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +By restricting to that range, we ensure that we can convert to +and from RFC 3339 date strings. +See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). + +# Examples + +Example 1: Compute Timestamp from POSIX `time()`. + + Timestamp timestamp; + timestamp.set_seconds(time(NULL)); + timestamp.set_nanos(0); + +Example 2: Compute Timestamp from POSIX `gettimeofday()`. + + struct timeval tv; + gettimeofday(&tv, NULL); + + Timestamp timestamp; + timestamp.set_seconds(tv.tv_sec); + timestamp.set_nanos(tv.tv_usec * 1000); + +Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. + + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z + // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. + Timestamp timestamp; + timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); + timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); + +Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. + + long millis = System.currentTimeMillis(); + + Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) + .setNanos((int) ((millis % 1000) * 1000000)).build(); + + +Example 5: Compute Timestamp from current time in Python. + + timestamp = Timestamp() + timestamp.GetCurrentTime() + +# JSON Mapping + +In JSON format, the Timestamp type is encoded as a string in the +[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +where {year} is always expressed using four digits while {month}, {day}, +{hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +is required, though only UTC (as indicated by "Z") is presently supported. + +For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +01:30 UTC on January 15, 2017. + +In JavaScript, one can convert a Date object to this format using the +standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +method. In Python, a standard `datetime.datetime` object can be converted +to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +to obtain a formatter capable of generating timestamps in this format. + +```yaml +"seconds": int +"nanos": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `seconds` | `int` | Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. | | +| `nanos` | `int` | Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/wrappers.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/wrappers.proto.sk.md new file mode 100644 index 000000000..de47661f7 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/wrappers.proto.sk.md @@ -0,0 +1,245 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Wrappers for primitive (non-message) types. These types are useful +for embedding primitives in the `google.protobuf.Any` type and for places +where we need to distinguish between the absence of a primitive +typed field and its default value. + + + +#### Types: + + +- [DoubleValue](#DoubleValue) +- [FloatValue](#FloatValue) +- [Int64Value](#Int64Value) +- [UInt64Value](#UInt64Value) +- [Int32Value](#Int32Value) +- [UInt32Value](#UInt32Value) +- [BoolValue](#BoolValue) +- [StringValue](#StringValue) +- [BytesValue](#BytesValue) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/wrappers.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/wrappers.proto) + + + + + +--- +### DoubleValue + + +Wrapper message for `double`. + +The JSON representation for `DoubleValue` is JSON number. + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | The double value. | | + + + + +--- +### FloatValue + + +Wrapper message for `float`. + +The JSON representation for `FloatValue` is JSON number. + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | The float value. | | + + + + +--- +### Int64Value + + +Wrapper message for `int64`. + +The JSON representation for `Int64Value` is JSON string. + +```yaml +"value": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `int` | The int64 value. | | + + + + +--- +### UInt64Value + + +Wrapper message for `uint64`. + +The JSON representation for `UInt64Value` is JSON string. + +```yaml +"value": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `int` | The uint64 value. | | + + + + +--- +### Int32Value + + +Wrapper message for `int32`. + +The JSON representation for `Int32Value` is JSON number. + +```yaml +"value": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `int` | The int32 value. | | + + + + +--- +### UInt32Value + + +Wrapper message for `uint32`. + +The JSON representation for `UInt32Value` is JSON number. + +```yaml +"value": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `int` | The uint32 value. | | + + + + +--- +### BoolValue + + +Wrapper message for `bool`. + +The JSON representation for `BoolValue` is JSON `true` and `false`. + +```yaml +"value": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `bool` | The bool value. | | + + + + +--- +### StringValue + + +Wrapper message for `string`. + +The JSON representation for `StringValue` is JSON string. + +```yaml +"value": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `string` | The string value. | | + + + + +--- +### BytesValue + + +Wrapper message for `bytes`. + +The JSON representation for `BytesValue` is JSON string. + +```yaml +"value": bytes + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `bytes` | The bytes value. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md new file mode 100644 index 000000000..4e59958a4 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md @@ -0,0 +1,110 @@ + + +### Package: `google.rpc` +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +#### Types: + + +- [Status](#Status) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/rpc/status.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/rpc/status.proto) + + + + + +--- +### Status + + +The `Status` type defines a logical error model that is suitable for different +programming environments, including REST APIs and RPC APIs. It is used by +[gRPC](https://github.com/grpc). The error model is designed to be: + +- Simple to use and understand for most users +- Flexible enough to meet unexpected needs + +# Overview + +The `Status` message contains three pieces of data: error code, error message, +and error details. The error code should be an enum value of +[google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +error message should be a developer-facing English message that helps +developers *understand* and *resolve* the error. If a localized user-facing +error message is needed, put the localized message in the error details or +localize it in the client. The optional error details may contain arbitrary +information about the error. There is a predefined set of error detail types +in the package `google.rpc` that can be used for common error conditions. + +# Language mapping + +The `Status` message is the logical representation of the error model, but it +is not necessarily the actual wire format. When the `Status` message is +exposed in different client libraries and different wire protocols, it can be +mapped differently. For example, it will likely be mapped to some exceptions +in Java, but more likely mapped to some error codes in C. + +# Other uses + +The error model and the `Status` message can be used in a variety of +environments, either with or without APIs, to provide a +consistent developer experience across different environments. + +Example uses of this error model include: + +- Partial errors. If a service needs to return partial errors to the client, + it may embed the `Status` in the normal response to indicate the partial + errors. + +- Workflow errors. A typical workflow has multiple steps. Each step may + have a `Status` message for error reporting. + +- Batch operations. If a client uses batch request and batch response, the + `Status` message should be used directly inside batch response, one for + each error sub-response. + +- Asynchronous operations. If an API call embeds asynchronous operation + results in its response, the status of those operations should be + represented directly using the `Status` message. + +- Logging. If some API errors are stored in logs, the message `Status` could + be used directly after any stripping needed for security/privacy reasons. + +```yaml +"code": int +"message": string +"details": []google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `code` | `int` | The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. | | +| `message` | `string` | A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. | | +| `details` | [[]google.protobuf.Any](../protobuf/any.proto.sk.md#Any) | A list of messages that carry the error details. There is a common set of message types for APIs to use. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/metrics.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/metrics.proto.sk.md new file mode 100644 index 000000000..87d3a1a9c --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/metrics.proto.sk.md @@ -0,0 +1,266 @@ + + +### Package: `io.prometheus.client` +Copyright 2013 Prometheus Team +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +#### Types: + + +- [LabelPair](#LabelPair) +- [Gauge](#Gauge) +- [Counter](#Counter) +- [Quantile](#Quantile) +- [Summary](#Summary) +- [Untyped](#Untyped) +- [Histogram](#Histogram) +- [Bucket](#Bucket) +- [Metric](#Metric) +- [MetricFamily](#MetricFamily) + + + + +##### Enums: + + + - [MetricType](#MetricType) + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/metrics.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/metrics.proto) + + + + + +--- +### LabelPair + + + +```yaml +"name": string +"value": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `value` | `string` | | | + + + + +--- +### Gauge + + + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | | | + + + + +--- +### Counter + + + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | | | + + + + +--- +### Quantile + + + +```yaml +"quantile": float +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `quantile` | `float` | | | +| `value` | `float` | | | + + + + +--- +### Summary + + + +```yaml +"sampleCount": int +"sampleSum": float +"quantile": []io.prometheus.client.Quantile + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `sampleCount` | `int` | | | +| `sampleSum` | `float` | | | +| `quantile` | [[]io.prometheus.client.Quantile](metrics.proto.sk.md#Quantile) | | | + + + + +--- +### Untyped + + + +```yaml +"value": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `float` | | | + + + + +--- +### Histogram + + + +```yaml +"sampleCount": int +"sampleSum": float +"bucket": []io.prometheus.client.Bucket + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `sampleCount` | `int` | | | +| `sampleSum` | `float` | | | +| `bucket` | [[]io.prometheus.client.Bucket](metrics.proto.sk.md#Bucket) | | | + + + + +--- +### Bucket + + + +```yaml +"cumulativeCount": int +"upperBound": float + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `cumulativeCount` | `int` | | | +| `upperBound` | `float` | | | + + + + +--- +### Metric + + + +```yaml +"label": []io.prometheus.client.LabelPair +"gauge": .io.prometheus.client.Gauge +"counter": .io.prometheus.client.Counter +"summary": .io.prometheus.client.Summary +"untyped": .io.prometheus.client.Untyped +"histogram": .io.prometheus.client.Histogram +"timestampMs": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `label` | [[]io.prometheus.client.LabelPair](metrics.proto.sk.md#LabelPair) | | | +| `gauge` | [.io.prometheus.client.Gauge](metrics.proto.sk.md#Gauge) | | | +| `counter` | [.io.prometheus.client.Counter](metrics.proto.sk.md#Counter) | | | +| `summary` | [.io.prometheus.client.Summary](metrics.proto.sk.md#Summary) | | | +| `untyped` | [.io.prometheus.client.Untyped](metrics.proto.sk.md#Untyped) | | | +| `histogram` | [.io.prometheus.client.Histogram](metrics.proto.sk.md#Histogram) | | | +| `timestampMs` | `int` | | | + + + + +--- +### MetricFamily + + + +```yaml +"name": string +"help": string +"type": .io.prometheus.client.MetricType +"metric": []io.prometheus.client.Metric + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `help` | `string` | | | +| `type` | [.io.prometheus.client.MetricType](metrics.proto.sk.md#MetricType) | | | +| `metric` | [[]io.prometheus.client.Metric](metrics.proto.sk.md#Metric) | | | + + + + +### MetricType + +Description: + +| Name | Description | +| ----- | ----------- | +| COUNTER | | +| GAUGE | | +| SUMMARY | | +| UNTYPED | | +| HISTOGRAM | | + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/trace.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/trace.proto.sk.md new file mode 100644 index 000000000..63b55919a --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/trace.proto.sk.md @@ -0,0 +1,467 @@ + + +### Package: `opencensus.proto.trace` +Copyright 2017, OpenCensus Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +#### Types: + + +- [Span](#Span) +- [Attributes](#Attributes) +- [TimeEvent](#TimeEvent) +- [Annotation](#Annotation) +- [MessageEvent](#MessageEvent) +- [Type](#Type) +- [TimeEvents](#TimeEvents) +- [Link](#Link) +- [Type](#Type) +- [Links](#Links) +- [SpanKind](#SpanKind) +- [Status](#Status) +- [AttributeValue](#AttributeValue) +- [StackTrace](#StackTrace) +- [StackFrame](#StackFrame) +- [StackFrames](#StackFrames) +- [Module](#Module) +- [TruncatableString](#TruncatableString) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/trace.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/trace.proto) + + + + + +--- +### Span + + +A span represents a single operation within a trace. Spans can be +nested to form a trace tree. Often, a trace contains a root span +that describes the end-to-end latency, and one or more subspans for +its sub-operations. A trace can also contain multiple root spans, +or none at all. Spans do not need to be contiguous - there may be +gaps or overlaps between spans in a trace. + +The next id is 15. +TODO(bdrutu): Add an example. + +```yaml +"traceId": bytes +"spanId": bytes +"parentSpanId": bytes +"name": .opencensus.proto.trace.TruncatableString +"kind": .opencensus.proto.trace.Span.SpanKind +"startTime": .google.protobuf.Timestamp +"endTime": .google.protobuf.Timestamp +"attributes": .opencensus.proto.trace.Span.Attributes +"stackTrace": .opencensus.proto.trace.StackTrace +"timeEvents": .opencensus.proto.trace.Span.TimeEvents +"links": .opencensus.proto.trace.Span.Links +"status": .opencensus.proto.trace.Status +"sameProcessAsParentSpan": .google.protobuf.BoolValue +"childSpanCount": .google.protobuf.UInt32Value + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `traceId` | `bytes` | A unique identifier for a trace. All spans from the same trace share the same `trace_id`. The ID is a 16-byte array. This field is required. | | +| `spanId` | `bytes` | A unique identifier for a span within a trace, assigned when the span is created. The ID is an 8-byte array. This field is required. | | +| `parentSpanId` | `bytes` | The `span_id` of this span's parent span. If this is a root span, then this field must be empty. The ID is an 8-byte array. | | +| `name` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | A description of the span's operation. For example, the name can be a qualified method name or a file name and a line number where the operation is called. A best practice is to use the same display name at the same call point in an application. This makes it easier to correlate spans in different traces. This field is required. | | +| `kind` | [.opencensus.proto.trace.Span.SpanKind](trace.proto.sk.md#SpanKind) | Distinguishes between spans generated in a particular context. For example, two spans with the same name may be distinguished using `CLIENT` and `SERVER` to identify queueing latency associated with the span. | | +| `startTime` | [.google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp) | The start time of the span. On the client side, this is the time kept by the local machine where the span execution starts. On the server side, this is the time when the server's application handler starts running. | | +| `endTime` | [.google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp) | The end time of the span. On the client side, this is the time kept by the local machine where the span execution ends. On the server side, this is the time when the server application handler stops running. | | +| `attributes` | [.opencensus.proto.trace.Span.Attributes](trace.proto.sk.md#Attributes) | A set of attributes on the span. | | +| `stackTrace` | [.opencensus.proto.trace.StackTrace](trace.proto.sk.md#StackTrace) | A stack trace captured at the start of the span. | | +| `timeEvents` | [.opencensus.proto.trace.Span.TimeEvents](trace.proto.sk.md#TimeEvents) | The included time events. | | +| `links` | [.opencensus.proto.trace.Span.Links](trace.proto.sk.md#Links) | The inclued links. | | +| `status` | [.opencensus.proto.trace.Status](trace.proto.sk.md#Status) | An optional final status for this span. | | +| `sameProcessAsParentSpan` | [.google.protobuf.BoolValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/bool-value) | A highly recommended but not required flag that identifies when a trace crosses a process boundary. True when the parent_span belongs to the same process as the current span. | | +| `childSpanCount` | [.google.protobuf.UInt32Value](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/u-int-32-value) | An optional number of child spans that were generated while this span was active. If set, allows an implementation to detect missing child spans. | | + + + + +--- +### Attributes + + +A set of attributes, each with a key and a value. + +```yaml +"attributeMap": map +"droppedAttributesCount": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `attributeMap` | `map` | The set of attributes. The value can be a string, an integer, or the Boolean values `true` and `false`. For example: "/instance_id": "my-instance" "/http/user_agent": "" "/http/server_latency": 300 "abc.com/myattribute": true | | +| `droppedAttributesCount` | `int` | The number of attributes that were discarded. Attributes can be discarded because their keys are too long or because there are too many attributes. If this value is 0, then no attributes were dropped. | | + + + + +--- +### TimeEvent + + +A time-stamped annotation or message event in the Span. + +```yaml +"time": .google.protobuf.Timestamp +"annotation": .opencensus.proto.trace.Span.TimeEvent.Annotation +"messageEvent": .opencensus.proto.trace.Span.TimeEvent.MessageEvent + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `time` | [.google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp) | The time the event occurred. | | +| `annotation` | [.opencensus.proto.trace.Span.TimeEvent.Annotation](trace.proto.sk.md#Annotation) | A text annotation with a set of attributes. | | +| `messageEvent` | [.opencensus.proto.trace.Span.TimeEvent.MessageEvent](trace.proto.sk.md#MessageEvent) | An event describing a message sent/received between Spans. | | + + + + +--- +### Annotation + + +A text annotation with a set of attributes. + +```yaml +"description": .opencensus.proto.trace.TruncatableString +"attributes": .opencensus.proto.trace.Span.Attributes + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `description` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | A user-supplied message describing the event. | | +| `attributes` | [.opencensus.proto.trace.Span.Attributes](trace.proto.sk.md#Attributes) | A set of attributes on the annotation. | | + + + + +--- +### MessageEvent + + +An event describing a message sent/received between Spans. + +```yaml +"type": .opencensus.proto.trace.Span.TimeEvent.MessageEvent.Type +"id": int +"uncompressedSize": int +"compressedSize": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `type` | [.opencensus.proto.trace.Span.TimeEvent.MessageEvent.Type](trace.proto.sk.md#Type) | The type of MessageEvent. Indicates whether the message was sent or received. | | +| `id` | `int` | An identifier for the MessageEvent's message that can be used to match SENT and RECEIVED MessageEvents. For example, this field could represent a sequence ID for a streaming RPC. It is recommended to be unique within a Span. | | +| `uncompressedSize` | `int` | The number of uncompressed bytes sent or received. | | +| `compressedSize` | `int` | The number of compressed bytes sent or received. If zero, assumed to be the same size as uncompressed. | | + + + + +--- +### Type + + +Indicates whether the message was sent or received. + +| Name | Description | +| ----- | ----------- | +| `TYPE_UNSPECIFIED` | Unknown event type. | +| `SENT` | Indicates a sent message. | +| `RECEIVED` | Indicates a received message. | + + + + +--- +### TimeEvents + + +A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +on the span, consisting of either user-supplied key-value pairs, or +details of a message sent/received between Spans. + +```yaml +"timeEvent": []opencensus.proto.trace.Span.TimeEvent +"droppedAnnotationsCount": int +"droppedMessageEventsCount": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `timeEvent` | [[]opencensus.proto.trace.Span.TimeEvent](trace.proto.sk.md#TimeEvent) | A collection of `TimeEvent`s. | | +| `droppedAnnotationsCount` | `int` | The number of dropped annotations in all the included time events. If the value is 0, then no annotations were dropped. | | +| `droppedMessageEventsCount` | `int` | The number of dropped message events in all the included time events. If the value is 0, then no message events were dropped. | | + + + + +--- +### Link + + +A pointer from the current span to another span in the same trace or in a +different trace. For example, this can be used in batching operations, +where a single batch handler processes multiple requests from different +traces or when the handler receives a request from a different project. + +```yaml +"traceId": bytes +"spanId": bytes +"type": .opencensus.proto.trace.Span.Link.Type +"attributes": .opencensus.proto.trace.Span.Attributes + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `traceId` | `bytes` | A unique identifier for a trace. All spans from the same trace share the same `trace_id`. The ID is a 16-byte array. | | +| `spanId` | `bytes` | A unique identifier for a span within a trace, assigned when the span is created. The ID is an 8-byte array. | | +| `type` | [.opencensus.proto.trace.Span.Link.Type](trace.proto.sk.md#Type) | The relationship of the current span relative to the linked span. | | +| `attributes` | [.opencensus.proto.trace.Span.Attributes](trace.proto.sk.md#Attributes) | A set of attributes on the link. | | + + + + +--- +### Type + + +The relationship of the current span relative to the linked span: child, +parent, or unspecified. + +| Name | Description | +| ----- | ----------- | +| `TYPE_UNSPECIFIED` | The relationship of the two spans is unknown, or known but other than parent-child. | +| `CHILD_LINKED_SPAN` | The linked span is a child of the current span. | +| `PARENT_LINKED_SPAN` | The linked span is a parent of the current span. | + + + + +--- +### Links + + +A collection of links, which are references from this span to a span +in the same or different trace. + +```yaml +"link": []opencensus.proto.trace.Span.Link +"droppedLinksCount": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `link` | [[]opencensus.proto.trace.Span.Link](trace.proto.sk.md#Link) | A collection of links. | | +| `droppedLinksCount` | `int` | The number of dropped links after the maximum size was enforced. If this value is 0, then no links were dropped. | | + + + + +--- +### SpanKind + + +Type of span. Can be used to specify additional relationships between spans +in addition to a parent/child relationship. + +| Name | Description | +| ----- | ----------- | +| `SPAN_KIND_UNSPECIFIED` | Unspecified. | +| `SERVER` | Indicates that the span covers server-side handling of an RPC or other remote network request. | +| `CLIENT` | Indicates that the span covers the client-side wrapper around an RPC or other remote request. | + + + + +--- +### Status + + +The `Status` type defines a logical error model that is suitable for different +programming environments, including REST APIs and RPC APIs. This proto's fields +are a subset of those of +[google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +which is used by [gRPC](https://github.com/grpc). + +```yaml +"code": int +"message": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `code` | `int` | The status code. | | +| `message` | `string` | A developer-facing error message, which should be in English. | | + + + + +--- +### AttributeValue + + +The value of an Attribute. + +```yaml +"stringValue": .opencensus.proto.trace.TruncatableString +"intValue": int +"boolValue": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `stringValue` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | A string up to 256 bytes long. | | +| `intValue` | `int` | A 64-bit signed integer. | | +| `boolValue` | `bool` | A Boolean value represented by `true` or `false`. | | + + + + +--- +### StackTrace + + +The call stack which originated this span. + +```yaml +"stackFrames": .opencensus.proto.trace.StackTrace.StackFrames +"stackTraceHashId": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `stackFrames` | [.opencensus.proto.trace.StackTrace.StackFrames](trace.proto.sk.md#StackFrames) | Stack frames in this stack trace. | | +| `stackTraceHashId` | `int` | The hash ID is used to conserve network bandwidth for duplicate stack traces within a single trace. Often multiple spans will have identical stack traces. The first occurrence of a stack trace should contain both `stack_frames` and a value in `stack_trace_hash_id`. Subsequent spans within the same request can refer to that stack trace by setting only `stack_trace_hash_id`. TODO: describe how to deal with the case where stack_trace_hash_id is zero because it was not set. | | + + + + +--- +### StackFrame + + +A single stack frame in a stack trace. + +```yaml +"functionName": .opencensus.proto.trace.TruncatableString +"originalFunctionName": .opencensus.proto.trace.TruncatableString +"fileName": .opencensus.proto.trace.TruncatableString +"lineNumber": int +"columnNumber": int +"loadModule": .opencensus.proto.trace.Module +"sourceVersion": .opencensus.proto.trace.TruncatableString + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `functionName` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | The fully-qualified name that uniquely identifies the function or method that is active in this frame. | | +| `originalFunctionName` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | An un-mangled function name, if `function_name` is [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can be fully qualified. | | +| `fileName` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | The name of the source file where the function call appears. | | +| `lineNumber` | `int` | The line number in `file_name` where the function call appears. | | +| `columnNumber` | `int` | The column number where the function call appears, if available. This is important in JavaScript because of its anonymous functions. | | +| `loadModule` | [.opencensus.proto.trace.Module](trace.proto.sk.md#Module) | The binary module from where the code was loaded. | | +| `sourceVersion` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | The version of the deployed source code. | | + + + + +--- +### StackFrames + + +A collection of stack frames, which can be truncated. + +```yaml +"frame": []opencensus.proto.trace.StackTrace.StackFrame +"droppedFramesCount": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `frame` | [[]opencensus.proto.trace.StackTrace.StackFrame](trace.proto.sk.md#StackFrame) | Stack frames in this call stack. | | +| `droppedFramesCount` | `int` | The number of stack frames that were dropped because there were too many stack frames. If this value is 0, then no stack frames were dropped. | | + + + + +--- +### Module + + +A description of a binary module. + +```yaml +"module": .opencensus.proto.trace.TruncatableString +"buildId": .opencensus.proto.trace.TruncatableString + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `module` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | TODO: document the meaning of this field. For example: main binary, kernel modules, and dynamic libraries such as libc.so, sharedlib.so. | | +| `buildId` | [.opencensus.proto.trace.TruncatableString](trace.proto.sk.md#TruncatableString) | A unique identifier for the module, usually a hash of its contents. | | + + + + +--- +### TruncatableString + + +A string that might be shortened to a specified length. + +```yaml +"value": string +"truncatedByteCount": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `value` | `string` | The shortened string. For example, if the original string was 500 bytes long and the limit of the string was 128 bytes, then this value contains the first 128 bytes of the 500-byte string. Note that truncation always happens on a character boundary, to ensure that a truncated string is still valid UTF-8. Because it may contain multi-byte characters, the size of the truncated string may be less than the truncation limit. | | +| `truncatedByteCount` | `int` | The number of bytes removed from the original string. If this value is 0, then the string was not shortened. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md new file mode 100644 index 000000000..547dba1ea --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md @@ -0,0 +1,50 @@ + + +### Package: `core.solo.io` +#### Types: + + +- [Metadata](#Metadata) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/v1/metadata.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/metadata.proto) + + + + + +--- +### Metadata + + +* +Metadata contains general properties of resources for purposes of versioning, annotating, and namespacing. + +```yaml +"name": string +"namespace": string +"cluster": string +"resourceVersion": string +"labels": map +"annotations": map + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | Name of the resource. Names must be unique and follow the following syntax rules: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters. | | +| `namespace` | `string` | Namespace is used for the namespacing of resources. | | +| `cluster` | `string` | Cluster indicates the cluster this resource belongs to Cluster is only applicable in certain contexts, e.g. Kubernetes An empty string here refers to the local cluster | | +| `resourceVersion` | `string` | An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. | | +| `labels` | `map` | Map of string keys and values that can be used to organize and categorize (scope and select) objects. Some resources contain `selectors` which can be linked with other resources by their labels | | +| `annotations` | `map` | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/ref.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/ref.proto.sk.md new file mode 100644 index 000000000..2324d2c15 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/ref.proto.sk.md @@ -0,0 +1,42 @@ + + +### Package: `core.solo.io` +#### Types: + + +- [ResourceRef](#ResourceRef) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/v1/ref.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/ref.proto) + + + + + +--- +### ResourceRef + + +A way to reference resources across namespaces +TODO(ilackarms): make upstreamname and secretref into ResourceRefs + +```yaml +"name": string +"namespace": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `namespace` | `string` | | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md new file mode 100644 index 000000000..942289de7 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md @@ -0,0 +1,44 @@ + + +### Package: `core.solo.io` +#### Types: + + +- [Resource](#Resource) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/v1/solo-kit.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/solo-kit.proto) + + + + + +--- +### Resource + + + +```yaml +"shortName": string +"pluralName": string +"clusterScoped": bool +"skipDocsGen": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `shortName` | `string` | becomes the kubernetes short name for the generated crd | | +| `pluralName` | `string` | becomes the kubernetes plural name for the generated crd | | +| `clusterScoped` | `bool` | the resource lives at the cluster level, namespace is ignored by the server | | +| `skipDocsGen` | `bool` | indicates whether documentation generation has to be skipped for the given resource, defaults to false | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md new file mode 100644 index 000000000..aaf41b2e5 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md @@ -0,0 +1,62 @@ + + +### Package: `core.solo.io` +#### Types: + + +- [Status](#Status) +- [State](#State) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/v1/status.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/status.proto) + + + + + +--- +### Status + + +* +Status indicates whether a resource has been (in)validated by a reporter in the system. +Statuses are meant to be read-only by users + +```yaml +"state": .core.solo.io.Status.State +"reason": string +"reportedBy": string +"subresourceStatuses": map + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `state` | [.core.solo.io.Status.State](status.proto.sk.md#State) | State is the enum indicating the state of the resource | | +| `reason` | `string` | Reason is a description of the error for Rejected resources. If the resource is pending or accepted, this field will be empty | | +| `reportedBy` | `string` | Reference to the reporter who wrote this status | | +| `subresourceStatuses` | `map` | Reference to statuses (by resource-ref string: "Kind.Namespace.Name") of subresources of the parent resource | | + + + + +--- +### State + + + +| Name | Description | +| ----- | ----------- | +| `Pending` | Pending status indicates the resource has not yet been validated | +| `Accepted` | Accepted indicates the resource has been validated | +| `Rejected` | Rejected indicates an invalid configuration by the user Rejected resources may be propagated to the xDS server depending on their severity | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md new file mode 100644 index 000000000..06577862d --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md @@ -0,0 +1,247 @@ + + +### Package: `apiserver.api.v1` +#### Types: + + +- [ReadRequest](#ReadRequest) +- [ReadResponse](#ReadResponse) +- [WriteRequest](#WriteRequest) +- [WriteResponse](#WriteResponse) +- [DeleteRequest](#DeleteRequest) +- [DeleteResponse](#DeleteResponse) +- [ListRequest](#ListRequest) +- [ListResponse](#ListResponse) +- [WatchRequest](#WatchRequest) +- [WatchResponse](#WatchResponse) +- [RegisterRequest](#RegisterRequest) +- [RegisterResponse](#RegisterResponse) + + + + +##### Source File: [github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto](https://github.com/solo-io/solo-kit/blob/master/pkg/api/v1/apiserver/api_server.proto) + + + + + +--- +### ReadRequest + + +GRPC stuff + +```yaml +"name": string +"namespace": string +"typeUrl": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `namespace` | `string` | | | +| `typeUrl` | `string` | | | + + + + +--- +### ReadResponse + + + +```yaml +"resource": .google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `resource` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | | | + + + + +--- +### WriteRequest + + + +```yaml +"resource": .google.protobuf.Any +"overwriteExisting": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `resource` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | | | +| `overwriteExisting` | `bool` | | | + + + + +--- +### WriteResponse + + + +```yaml +"resource": .google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `resource` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | | | + + + + +--- +### DeleteRequest + + + +```yaml +"name": string +"namespace": string +"typeUrl": string +"ignoreNotExist": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `namespace` | `string` | | | +| `typeUrl` | `string` | | | +| `ignoreNotExist` | `bool` | | | + + + + +--- +### DeleteResponse + + + +```yaml + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | + + + + +--- +### ListRequest + + + +```yaml +"namespace": string +"typeUrl": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `namespace` | `string` | | | +| `typeUrl` | `string` | | | + + + + +--- +### ListResponse + + + +```yaml +"resourceList": []google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `resourceList` | [[]google.protobuf.Any](../../../../api/external/google/protobuf/any.proto.sk.md#Any) | | | + + + + +--- +### WatchRequest + + + +```yaml +"namespace": string +"typeUrl": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `namespace` | `string` | | | +| `typeUrl` | `string` | | | + + + + +--- +### WatchResponse + + + +```yaml +"resourceList": []google.protobuf.Any + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `resourceList` | [[]google.protobuf.Any](../../../../api/external/google/protobuf/any.proto.sk.md#Any) | | | + + + + +--- +### RegisterRequest + + + +```yaml + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | + + + + +--- +### RegisterResponse + + + +```yaml + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md new file mode 100644 index 000000000..adf957d03 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md @@ -0,0 +1,95 @@ + + +### Package: `testing.solo.io` +Syntax Comments +Syntax Comments a + + + +#### Types: + + +- [MockResource](#MockResource) **Top-Level Resource** +- [FakeResource](#FakeResource) **Top-Level Resource** +- [MockXdsResourceConfig](#MockXdsResourceConfig) + + + + +##### Source File: [github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto](https://github.com/solo-io/solo-kit/blob/master/test/mocks/api/v1/mock_resources.proto) + + + + + +--- +### MockResource + + +Mock resources for goofin off + +```yaml +"status": .core.solo.io.Status +"metadata": .core.solo.io.Metadata +"data": string +"someDumbField": string +"oneofOne": string +"oneofTwo": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `data` | `string` | | | +| `someDumbField` | `string` | | | +| `oneofOne` | `string` | | | +| `oneofTwo` | `bool` | | | + + + + +--- +### FakeResource + + + +```yaml +"count": int +"metadata": .core.solo.io.Metadata +"status": .core.solo.io.Status + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `count` | `int` | | | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | + + + + +--- +### MockXdsResourceConfig + + + + +```yaml +"domain": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `domain` | `string` | | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md new file mode 100644 index 000000000..328c22284 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md @@ -0,0 +1,65 @@ + + +### Package: `testing.solo.io` +#### Types: + + +- [AnotherMockResource](#AnotherMockResource) **Top-Level Resource** +- [ClusterResource](#ClusterResource) **Top-Level Resource** + + + + +##### Source File: [github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto](https://github.com/solo-io/solo-kit/blob/master/test/mocks/api/v1/more_mock_resources.proto) + + + + + +--- +### AnotherMockResource + + +Description of the AnotherMockResource + +```yaml +"metadata": .core.solo.io.Metadata +"status": .core.solo.io.Status +"basicField": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | +| `basicField` | `string` | comments that go above the basic field in our docs | | + + + + +--- +### ClusterResource + + + +```yaml +"metadata": .core.solo.io.Metadata +"status": .core.solo.io.Status +"basicField": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | +| `basicField` | `string` | comments that go above the basic field in our docs | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md new file mode 100644 index 000000000..f984b0dd8 --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md @@ -0,0 +1,74 @@ + + +### Package: `testing.solo.io` +Syntax Comments +Syntax Comments a + + + +#### Types: + + +- [MockResource](#MockResource) **Top-Level Resource** +- [FakeResource](#FakeResource) **Top-Level Resource** + + + + +##### Source File: [github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto](https://github.com/solo-io/solo-kit/blob/master/test/mocks/api/v1alpha1/mock_resources.proto) + + + + + +--- +### MockResource + + +Mock resources for goofin off + +```yaml +"status": .core.solo.io.Status +"metadata": .core.solo.io.Metadata +"data": string +"someDumbField": string +"oneofOne": string +"oneofTwo": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `data` | `string` | | | +| `someDumbField` | `string` | | | +| `oneofOne` | `string` | | | +| `oneofTwo` | `bool` | | | + + + + +--- +### FakeResource + + + +```yaml +"count": int +"metadata": .core.solo.io.Metadata + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `count` | `int` | | | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | + + + + + + + + diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto.sk.md new file mode 100644 index 000000000..49c214ffd --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto.sk.md @@ -0,0 +1,49 @@ + + +### Package: `testing.solo.io` +#### Types: + + +- [MockResource](#MockResource) **Top-Level Resource** + + + + +##### Source File: [github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto](https://github.com/solo-io/solo-kit/blob/master/test/mocks/api/v2alpha1/mock_resources.proto) + + + + + +--- +### MockResource + + +The best mock resource you ever done seen + +```yaml +"status": .core.solo.io.Status +"metadata": .core.solo.io.Metadata +"someDumbField": string +"data": string +"oneofOne": string +"oneofTwo": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `status` | [.core.solo.io.Status](../../../../api/v1/status.proto.sk.md#Status) | | | +| `metadata` | [.core.solo.io.Metadata](../../../../api/v1/metadata.proto.sk.md#Metadata) | | | +| `someDumbField` | `string` | | | +| `data` | `string` | | | +| `oneofOne` | `string` | | | +| `oneofTwo` | `bool` | | | + + + + + + + + diff --git a/test/mocks/docs/google/protobuf/descriptor.proto.sk.md b/test/mocks/docs/google/protobuf/descriptor.proto.sk.md new file mode 100644 index 000000000..410c59cce --- /dev/null +++ b/test/mocks/docs/google/protobuf/descriptor.proto.sk.md @@ -0,0 +1,825 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Author: kenton@google.com (Kenton Varda) + Based on original Protocol Buffers design by + Sanjay Ghemawat, Jeff Dean, and others. + +The messages in this file describe the definitions found in .proto files. +A valid .proto file can be translated directly to a FileDescriptorProto +without any other information (e.g. without reading its imports). + + + +#### Types: + + +- [FileDescriptorSet](#FileDescriptorSet) +- [FileDescriptorProto](#FileDescriptorProto) +- [DescriptorProto](#DescriptorProto) +- [ExtensionRange](#ExtensionRange) +- [ReservedRange](#ReservedRange) +- [FieldDescriptorProto](#FieldDescriptorProto) +- [Type](#Type) +- [Label](#Label) +- [OneofDescriptorProto](#OneofDescriptorProto) +- [EnumDescriptorProto](#EnumDescriptorProto) +- [EnumValueDescriptorProto](#EnumValueDescriptorProto) +- [ServiceDescriptorProto](#ServiceDescriptorProto) +- [MethodDescriptorProto](#MethodDescriptorProto) +- [FileOptions](#FileOptions) +- [OptimizeMode](#OptimizeMode) +- [MessageOptions](#MessageOptions) +- [FieldOptions](#FieldOptions) +- [CType](#CType) +- [JSType](#JSType) +- [OneofOptions](#OneofOptions) +- [EnumOptions](#EnumOptions) +- [EnumValueOptions](#EnumValueOptions) +- [ServiceOptions](#ServiceOptions) +- [MethodOptions](#MethodOptions) +- [IdempotencyLevel](#IdempotencyLevel) +- [UninterpretedOption](#UninterpretedOption) +- [NamePart](#NamePart) +- [SourceCodeInfo](#SourceCodeInfo) +- [Location](#Location) +- [GeneratedCodeInfo](#GeneratedCodeInfo) +- [Annotation](#Annotation) + + + + +##### Source File: `google/protobuf/descriptor.proto` + + + + + +--- +### FileDescriptorSet + + +The protocol compiler can output a FileDescriptorSet containing the .proto +files it parses. + +```yaml +"file": []google.protobuf.FileDescriptorProto + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `file` | [[]google.protobuf.FileDescriptorProto](descriptor.proto.sk.md#FileDescriptorProto) | | | + + + + +--- +### FileDescriptorProto + + +Describes a complete .proto file. + +```yaml +"name": string +"package": string +"dependency": []string +"publicDependency": []int +"weakDependency": []int +"messageType": []google.protobuf.DescriptorProto +"enumType": []google.protobuf.EnumDescriptorProto +"service": []google.protobuf.ServiceDescriptorProto +"extension": []google.protobuf.FieldDescriptorProto +"options": .google.protobuf.FileOptions +"sourceCodeInfo": .google.protobuf.SourceCodeInfo +"syntax": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `package` | `string` | | | +| `dependency` | `[]string` | Names of files imported by this file. | | +| `publicDependency` | `[]int` | Indexes of the public imported files in the dependency list above. | | +| `weakDependency` | `[]int` | Indexes of the weak imported files in the dependency list. For Google-internal migration only. Do not use. | | +| `messageType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | All top-level definitions in this file. | | +| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | +| `service` | [[]google.protobuf.ServiceDescriptorProto](descriptor.proto.sk.md#ServiceDescriptorProto) | | | +| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `options` | [.google.protobuf.FileOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options) | | | +| `sourceCodeInfo` | [.google.protobuf.SourceCodeInfo](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/source-code-info) | This field contains optional information about the original source code. You may safely remove this entire field without harming runtime functionality of the descriptors -- the information is needed only by development tools. | | +| `syntax` | `string` | The syntax of the proto file. The supported values are "proto2" and "proto3". | | + + + + +--- +### DescriptorProto + + +Describes a message type. + +```yaml +"name": string +"field": []google.protobuf.FieldDescriptorProto +"extension": []google.protobuf.FieldDescriptorProto +"nestedType": []google.protobuf.DescriptorProto +"enumType": []google.protobuf.EnumDescriptorProto +"extensionRange": []google.protobuf.DescriptorProto.ExtensionRange +"oneofDecl": []google.protobuf.OneofDescriptorProto +"options": .google.protobuf.MessageOptions +"reservedRange": []google.protobuf.DescriptorProto.ReservedRange +"reservedName": []string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `field` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `nestedType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | | | +| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | +| `extensionRange` | [[]google.protobuf.DescriptorProto.ExtensionRange](descriptor.proto.sk.md#ExtensionRange) | | | +| `oneofDecl` | [[]google.protobuf.OneofDescriptorProto](descriptor.proto.sk.md#OneofDescriptorProto) | | | +| `options` | [.google.protobuf.MessageOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/message-options) | | | +| `reservedRange` | [[]google.protobuf.DescriptorProto.ReservedRange](descriptor.proto.sk.md#ReservedRange) | | | +| `reservedName` | `[]string` | Reserved field names, which may not be used by fields in the same message. A given name may only be reserved once. | | + + + + +--- +### ExtensionRange + + + +```yaml +"start": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `start` | `int` | | | +| `end` | `int` | | | + + + + +--- +### ReservedRange + + +Range of reserved tag numbers. Reserved tag numbers may not be used by +fields or extension ranges in the same message. Reserved ranges may +not overlap. + +```yaml +"start": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `start` | `int` | | | +| `end` | `int` | | | + + + + +--- +### FieldDescriptorProto + + +Describes a field within a message. + +```yaml +"name": string +"number": int +"label": .google.protobuf.FieldDescriptorProto.Label +"type": .google.protobuf.FieldDescriptorProto.Type +"typeName": string +"extendee": string +"defaultValue": string +"oneofIndex": int +"jsonName": string +"options": .google.protobuf.FieldOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `number` | `int` | | | +| `label` | [.google.protobuf.FieldDescriptorProto.Label](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-label) | | | +| `type` | [.google.protobuf.FieldDescriptorProto.Type](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-type) | If type_name is set, this need not be set. If both this and type_name are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. | | +| `typeName` | `string` | For message and enum types, this is the name of the type. If the name starts with a '.', it is fully-qualified. Otherwise, C++-like scoping rules are used to find the type (i.e. first the nested types within this message are searched, then within the parent, on up to the root namespace). | | +| `extendee` | `string` | For extensions, this is the name of the type being extended. It is resolved in the same manner as type_name. | | +| `defaultValue` | `string` | For numeric types, contains the original text representation of the value. For booleans, "true" or "false". For strings, contains the default text contents (not escaped in any way). For bytes, contains the C escaped value. All bytes >= 128 are escaped. TODO(kenton): Base-64 encode? | | +| `oneofIndex` | `int` | If set, gives the index of a oneof in the containing type's oneof_decl list. This field is a member of that oneof. | | +| `jsonName` | `string` | JSON name of this field. The value is set by protocol compiler. If the user has set a "json_name" option on this field, that option's value will be used. Otherwise, it's deduced from the field's name by converting it to camelCase. | | +| `options` | [.google.protobuf.FieldOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options) | | | + + + + +--- +### Type + + + +| Name | Description | +| ----- | ----------- | +| `TYPE_DOUBLE` | 0 is reserved for errors. Order is weird for historical reasons. | +| `TYPE_FLOAT` | | +| `TYPE_INT64` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if negative values are likely. | +| `TYPE_UINT64` | | +| `TYPE_INT32` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if negative values are likely. | +| `TYPE_FIXED64` | | +| `TYPE_FIXED32` | | +| `TYPE_BOOL` | | +| `TYPE_STRING` | | +| `TYPE_GROUP` | Tag-delimited aggregate. Group type is deprecated and not supported in proto3. However, Proto3 implementations should still be able to parse the group wire format and treat group fields as unknown fields. | +| `TYPE_MESSAGE` | | +| `TYPE_BYTES` | New in version 2. | +| `TYPE_UINT32` | | +| `TYPE_ENUM` | | +| `TYPE_SFIXED32` | | +| `TYPE_SFIXED64` | | +| `TYPE_SINT32` | | +| `TYPE_SINT64` | | + + + + +--- +### Label + + + +| Name | Description | +| ----- | ----------- | +| `LABEL_OPTIONAL` | 0 is reserved for errors | +| `LABEL_REQUIRED` | | +| `LABEL_REPEATED` | | + + + + +--- +### OneofDescriptorProto + + +Describes a oneof. + +```yaml +"name": string +"options": .google.protobuf.OneofOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `options` | [.google.protobuf.OneofOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/oneof-options) | | | + + + + +--- +### EnumDescriptorProto + + +Describes an enum type. + +```yaml +"name": string +"value": []google.protobuf.EnumValueDescriptorProto +"options": .google.protobuf.EnumOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `value` | [[]google.protobuf.EnumValueDescriptorProto](descriptor.proto.sk.md#EnumValueDescriptorProto) | | | +| `options` | [.google.protobuf.EnumOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-options) | | | + + + + +--- +### EnumValueDescriptorProto + + +Describes a value within an enum. + +```yaml +"name": string +"number": int +"options": .google.protobuf.EnumValueOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `number` | `int` | | | +| `options` | [.google.protobuf.EnumValueOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-value-options) | | | + + + + +--- +### ServiceDescriptorProto + + +Describes a service. + +```yaml +"name": string +"method": []google.protobuf.MethodDescriptorProto +"options": .google.protobuf.ServiceOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `method` | [[]google.protobuf.MethodDescriptorProto](descriptor.proto.sk.md#MethodDescriptorProto) | | | +| `options` | [.google.protobuf.ServiceOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/service-options) | | | + + + + +--- +### MethodDescriptorProto + + +Describes a method of a service. + +```yaml +"name": string +"inputType": string +"outputType": string +"options": .google.protobuf.MethodOptions +"clientStreaming": bool +"serverStreaming": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `inputType` | `string` | Input and output type names. These are resolved in the same way as FieldDescriptorProto.type_name, but must refer to a message type. | | +| `outputType` | `string` | | | +| `options` | [.google.protobuf.MethodOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options) | | | +| `clientStreaming` | `bool` | Identifies if client streams multiple client messages | Default: false | +| `serverStreaming` | `bool` | Identifies if server streams multiple server messages | Default: false | + + + + +--- +### FileOptions + + + +```yaml +"javaPackage": string +"javaOuterClassname": string +"javaMultipleFiles": bool +"javaGenerateEqualsAndHash": bool +"javaStringCheckUtf8": bool +"optimizeFor": .google.protobuf.FileOptions.OptimizeMode +"goPackage": string +"ccGenericServices": bool +"javaGenericServices": bool +"pyGenericServices": bool +"deprecated": bool +"ccEnableArenas": bool +"objcClassPrefix": string +"csharpNamespace": string +"swiftPrefix": string +"phpClassPrefix": string +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `javaPackage` | `string` | Sets the Java package where classes generated from this .proto will be placed. By default, the proto package is used, but this is often inappropriate because proto packages do not normally start with backwards domain names. | | +| `javaOuterClassname` | `string` | If set, all the classes from the .proto file are wrapped in a single outer class with the given name. This applies to both Proto1 (equivalent to the old "--one_java_file" option) and Proto2 (where a .proto always translates to a single class, but you may want to explicitly choose the class name). | | +| `javaMultipleFiles` | `bool` | If set true, then the Java code generator will generate a separate .java file for each top-level message, enum, and service defined in the .proto file. Thus, these types will *not* be nested inside the outer class named by java_outer_classname. However, the outer class will still be generated to contain the file's getDescriptor() method as well as any top-level extensions defined in the file. | Default: false | +| `javaGenerateEqualsAndHash` | `bool` | This option does nothing. | | +| `javaStringCheckUtf8` | `bool` | If set true, then the Java2 code generator will generate code that throws an exception whenever an attempt is made to assign a non-UTF-8 byte sequence to a string field. Message reflection will do the same. However, an extension field still accepts non-UTF-8 byte sequences. This option has no effect on when used with the lite runtime. | Default: false | +| `optimizeFor` | [.google.protobuf.FileOptions.OptimizeMode](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options.-optimize-mode) | | Default: SPEED | +| `goPackage` | `string` | Sets the Go package where structs generated from this .proto will be placed. If omitted, the Go package will be derived from the following: - The basename of the package import path, if provided. - Otherwise, the package statement in the .proto file, if present. - Otherwise, the basename of the .proto file, without extension. | | +| `ccGenericServices` | `bool` | Should generic services be generated in each language? "Generic" services are not specific to any particular RPC system. They are generated by the main code generators in each language (without additional plugins). Generic services were the only kind of service generation supported by early versions of google.protobuf. Generic services are now considered deprecated in favor of using plugins that generate code specific to your particular RPC system. Therefore, these default to false. Old code which depends on generic services should explicitly set them to true. | Default: false | +| `javaGenericServices` | `bool` | | Default: false | +| `pyGenericServices` | `bool` | | Default: false | +| `deprecated` | `bool` | Is this file deprecated? Depending on the target platform, this can emit Deprecated annotations for everything in the file, or it will be completely ignored; in the very least, this is a formalization for deprecating files. | Default: false | +| `ccEnableArenas` | `bool` | Enables the use of arenas for the proto messages in this file. This applies only to generated classes for C++. | Default: false | +| `objcClassPrefix` | `string` | Sets the objective c class prefix which is prepended to all objective c generated classes from this .proto. There is no default. | | +| `csharpNamespace` | `string` | Namespace for generated classes; defaults to the package. | | +| `swiftPrefix` | `string` | By default Swift generators will take the proto package and CamelCase it replacing '.' with underscore and use that to prefix the types/symbols defined. When this options is provided, they will use this value instead to prefix the types/symbols defined. | | +| `phpClassPrefix` | `string` | Sets the php class prefix which is prepended to all php generated classes from this .proto. Default is empty. | | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### OptimizeMode + + +Generated classes can be optimized for speed or code size. + +| Name | Description | +| ----- | ----------- | +| `SPEED` | | +| `CODE_SIZE` | etc. | +| `LITE_RUNTIME` | | + + + + +--- +### MessageOptions + + + +```yaml +"messageSetWireFormat": bool +"noStandardDescriptorAccessor": bool +"deprecated": bool +"mapEntry": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `messageSetWireFormat` | `bool` | Set true to use the old proto1 MessageSet wire format for extensions. This is provided for backwards-compatibility with the MessageSet wire format. You should not use this for any other reason: It's less efficient, has fewer features, and is more complicated. The message must be defined exactly as follows: message Foo { option message_set_wire_format = true; extensions 4 to max; } Note that the message cannot have any defined fields; MessageSets only have extensions. All extensions of your type must be singular messages; e.g. they cannot be int32s, enums, or repeated messages. Because this is an option, the above two restrictions are not enforced by the protocol compiler. | Default: false | +| `noStandardDescriptorAccessor` | `bool` | Disables the generation of the standard "descriptor()" accessor, which can conflict with a field of the same name. This is meant to make migration from proto1 easier; new code should avoid fields named "descriptor". | Default: false | +| `deprecated` | `bool` | Is this message deprecated? Depending on the target platform, this can emit Deprecated annotations for the message, or it will be completely ignored; in the very least, this is a formalization for deprecating messages. | Default: false | +| `mapEntry` | `bool` | Whether the message is an automatically generated map entry type for the maps field. For maps fields: map map_field = 1; The parsed descriptor looks like: message MapFieldEntry { option map_entry = true; optional KeyType key = 1; optional ValueType value = 2; } repeated MapFieldEntry map_field = 1; Implementations may choose not to generate the map_entry=true message, but use a native map in the target language to hold the keys and values. The reflection APIs in such implementions still need to work as if the field is a repeated message field. NOTE: Do not set the option in .proto files. Always use the maps syntax instead. The option should only be implicitly set by the proto compiler parser. | | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### FieldOptions + + + +```yaml +"ctype": .google.protobuf.FieldOptions.CType +"packed": bool +"jstype": .google.protobuf.FieldOptions.JSType +"lazy": bool +"deprecated": bool +"weak": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `ctype` | [.google.protobuf.FieldOptions.CType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.c-type) | The ctype option instructs the C++ code generator to use a different representation of the field than it normally would. See the specific options below. This option is not yet implemented in the open source release -- sorry, we'll try to include it in a future version! | Default: STRING | +| `packed` | `bool` | The packed option can be enabled for repeated primitive fields to enable a more efficient representation on the wire. Rather than repeatedly writing the tag and type for each element, the entire array is encoded as a single length-delimited blob. In proto3, only explicit setting it to false will avoid using packed encoding. | | +| `jstype` | [.google.protobuf.FieldOptions.JSType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.js-type) | The jstype option determines the JavaScript type used for values of the field. The option is permitted only for 64 bit integral and fixed types (int64, uint64, sint64, fixed64, sfixed64). By default these types are represented as JavaScript strings. This avoids loss of precision that can happen when a large value is converted to a floating point JavaScript numbers. Specifying JS_NUMBER for the jstype causes the generated JavaScript code to use the JavaScript "number" type instead of strings. This option is an enum to permit additional types to be added, e.g. goog.math.Integer. | Default: JS_NORMAL | +| `lazy` | `bool` | Should this field be parsed lazily? Lazy applies only to message-type fields. It means that when the outer message is initially parsed, the inner message's contents will not be parsed but instead stored in encoded form. The inner message will actually be parsed when it is first accessed. This is only a hint. Implementations are free to choose whether to use eager or lazy parsing regardless of the value of this option. However, setting this option true suggests that the protocol author believes that using lazy parsing on this field is worth the additional bookkeeping overhead typically needed to implement it. This option does not affect the public interface of any generated code; all method signatures remain the same. Furthermore, thread-safety of the interface is not affected by this option; const methods remain safe to call from multiple threads concurrently, while non-const methods continue to require exclusive access. Note that implementations may choose not to check required fields within a lazy sub-message. That is, calling IsInitialized() on the outer message may return true even if the inner message has missing required fields. This is necessary because otherwise the inner message would have to be parsed in order to perform the check, defeating the purpose of lazy parsing. An implementation which chooses not to check required fields must be consistent about it. That is, for any particular sub-message, the implementation must either *always* check its required fields, or *never* check its required fields, regardless of whether or not the message has been parsed. | Default: false | +| `deprecated` | `bool` | Is this field deprecated? Depending on the target platform, this can emit Deprecated annotations for accessors, or it will be completely ignored; in the very least, this is a formalization for deprecating fields. | Default: false | +| `weak` | `bool` | For Google-internal migration only. Do not use. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### CType + + + +| Name | Description | +| ----- | ----------- | +| `STRING` | Default mode. | +| `CORD` | | +| `STRING_PIECE` | | + + + + +--- +### JSType + + + +| Name | Description | +| ----- | ----------- | +| `JS_NORMAL` | Use the default type. | +| `JS_STRING` | Use JavaScript strings. | +| `JS_NUMBER` | Use JavaScript numbers. | + + + + +--- +### OneofOptions + + + +```yaml +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### EnumOptions + + + +```yaml +"allowAlias": bool +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `allowAlias` | `bool` | Set this option to true to allow mapping different tag names to the same value. | | +| `deprecated` | `bool` | Is this enum deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum, or it will be completely ignored; in the very least, this is a formalization for deprecating enums. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### EnumValueOptions + + + +```yaml +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this enum value deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum value, or it will be completely ignored; in the very least, this is a formalization for deprecating enum values. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### ServiceOptions + + + +```yaml +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this service deprecated? Depending on the target platform, this can emit Deprecated annotations for the service, or it will be completely ignored; in the very least, this is a formalization for deprecating services. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### MethodOptions + + + +```yaml +"deprecated": bool +"idempotencyLevel": .google.protobuf.MethodOptions.IdempotencyLevel +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this method deprecated? Depending on the target platform, this can emit Deprecated annotations for the method, or it will be completely ignored; in the very least, this is a formalization for deprecating methods. | Default: false | +| `idempotencyLevel` | [.google.protobuf.MethodOptions.IdempotencyLevel](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options.-idempotency-level) | | Default: IDEMPOTENCY_UNKNOWN | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### IdempotencyLevel + + +Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +or neither? HTTP based RPC implementation may choose GET verb for safe +methods, and PUT verb for idempotent methods instead of the default POST. + +| Name | Description | +| ----- | ----------- | +| `IDEMPOTENCY_UNKNOWN` | | +| `NO_SIDE_EFFECTS` | | +| `IDEMPOTENT` | | + + + + +--- +### UninterpretedOption + + +A message representing a option the parser does not recognize. This only +appears in options protos created by the compiler::Parser class. +DescriptorPool resolves these when building Descriptor objects. Therefore, +options protos in descriptor objects (e.g. returned by Descriptor::options(), +or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +in them. + +```yaml +"name": []google.protobuf.UninterpretedOption.NamePart +"identifierValue": string +"positiveIntValue": int +"negativeIntValue": int +"doubleValue": float +"stringValue": bytes +"aggregateValue": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | [[]google.protobuf.UninterpretedOption.NamePart](descriptor.proto.sk.md#NamePart) | | | +| `identifierValue` | `string` | The value of the uninterpreted option, in whatever type the tokenizer identified it as during parsing. Exactly one of these should be set. | | +| `positiveIntValue` | `int` | | | +| `negativeIntValue` | `int` | | | +| `doubleValue` | `float` | | | +| `stringValue` | `bytes` | | | +| `aggregateValue` | `string` | | | + + + + +--- +### NamePart + + +The name of the uninterpreted option. Each string represents a segment in +a dot-separated name. is_extension is true iff a segment represents an +extension (denoted with parentheses in options specs in .proto files). +E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +"foo.(bar.baz).qux". + +```yaml +"namePart": string +"isExtension": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `namePart` | `string` | | | +| `isExtension` | `bool` | | | + + + + +--- +### SourceCodeInfo + + +Encapsulates information about the original source file from which a +FileDescriptorProto was generated. + +```yaml +"location": []google.protobuf.SourceCodeInfo.Location + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `location` | [[]google.protobuf.SourceCodeInfo.Location](descriptor.proto.sk.md#Location) | A Location identifies a piece of source code in a .proto file which corresponds to a particular definition. This information is intended to be useful to IDEs, code indexers, documentation generators, and similar tools. For example, say we have a file like: message Foo { optional string foo = 1; } Let's look at just the field definition: optional string foo = 1; ^ ^^ ^^ ^ ^^^ a bc de f ghi We have the following locations: span path represents [a,i) [ 4, 0, 2, 0 ] The whole field definition. [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). [c,d) [ 4, 0, 2, 0, 5 ] The type (string). [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). [g,h) [ 4, 0, 2, 0, 3 ] The number (1). Notes: - A location may refer to a repeated field itself (i.e. not to any particular index within it). This is used whenever a set of elements are logically enclosed in a single code segment. For example, an entire extend block (possibly containing multiple extension definitions) will have an outer location whose path refers to the "extensions" repeated field without an index. - Multiple locations may have the same path. This happens when a single logical declaration is spread out across multiple places. The most obvious example is the "extend" block again -- there may be multiple extend blocks in the same scope, each of which will have the same path. - A location's span is not always a subset of its parent's span. For example, the "extendee" of an extension declaration appears at the beginning of the "extend" block and is shared by all extensions within the block. - Just because a location's span is a subset of some other location's span does not mean that it is a descendent. For example, a "group" defines both a type and a field in a single declaration. Thus, the locations corresponding to the type and field and their components will overlap. - Code which tries to interpret locations should probably be designed to ignore those that it doesn't understand, as more types of locations could be recorded in the future. | | + + + + +--- +### Location + + + +```yaml +"path": []int +"span": []int +"leadingComments": string +"trailingComments": string +"leadingDetachedComments": []string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `path` | `[]int` | Identifies which part of the FileDescriptorProto was defined at this location. Each element is a field number or an index. They form a path from the root FileDescriptorProto to the place where the definition. For example, this path: [ 4, 3, 2, 7, 1 ] refers to: file.message_type(3) // 4, 3 .field(7) // 2, 7 .name() // 1 This is because FileDescriptorProto.message_type has field number 4: repeated DescriptorProto message_type = 4; and DescriptorProto.field has field number 2: repeated FieldDescriptorProto field = 2; and FieldDescriptorProto.name has field number 1: optional string name = 1; Thus, the above path gives the location of a field name. If we removed the last element: [ 4, 3, 2, 7 ] this path refers to the whole field declaration (from the beginning of the label to the terminating semicolon). | | +| `span` | `[]int` | Always has exactly three or four elements: start line, start column, end line (optional, otherwise assumed same as start line), end column. These are packed into a single field for efficiency. Note that line and column numbers are zero-based -- typically you will want to add 1 to each before displaying to a user. | | +| `leadingComments` | `string` | If this SourceCodeInfo represents a complete declaration, these are any comments appearing before and after the declaration which appear to be attached to the declaration. A series of line comments appearing on consecutive lines, with no other tokens appearing on those lines, will be treated as a single comment. leading_detached_comments will keep paragraphs of comments that appear before (but not connected to) the current element. Each paragraph, separated by empty lines, will be one comment element in the repeated field. Only the comment content is provided; comment markers (e.g. //) are stripped out. For block comments, leading whitespace and an asterisk will be stripped from the beginning of each line other than the first. Newlines are included in the output. Examples: optional int32 foo = 1; // Comment attached to foo. // Comment attached to bar. optional int32 bar = 2; optional string baz = 3; // Comment attached to baz. // Another line attached to baz. // Comment attached to qux. // // Another line attached to qux. optional double qux = 4; // Detached comment for corge. This is not leading or trailing comments // to qux or corge because there are blank lines separating it from // both. // Detached comment for corge paragraph 2. optional string corge = 5; /* Block comment attached * to corge. Leading asterisks * will be removed. */ /* Block comment attached to * grault. */ optional int32 grault = 6; // ignored detached comments. | | +| `trailingComments` | `string` | | | +| `leadingDetachedComments` | `[]string` | | | + + + + +--- +### GeneratedCodeInfo + + +Describes the relationship between generated code and its original source +file. A GeneratedCodeInfo message is associated with only one generated +source file, but may contain references to different source .proto files. + +```yaml +"annotation": []google.protobuf.GeneratedCodeInfo.Annotation + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `annotation` | [[]google.protobuf.GeneratedCodeInfo.Annotation](descriptor.proto.sk.md#Annotation) | An Annotation connects some span of text in generated code to an element of its generating .proto file. | | + + + + +--- +### Annotation + + + +```yaml +"path": []int +"sourceFile": string +"begin": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `path` | `[]int` | Identifies the element in the original source .proto file. This field is formatted the same as SourceCodeInfo.Location.path. | | +| `sourceFile` | `string` | Identifies the filesystem path to the original source .proto. | | +| `begin` | `int` | Identifies the starting offset in bytes in the generated code that relates to the identified object. | | +| `end` | `int` | Identifies the ending offset in bytes in the generated code that relates to the identified offset. The end offset should be one past the last relevant byte (so the length of the text = end - begin). | | + + + + + + + + diff --git a/test/mocks/docs/testing.solo.io.project.sk.md b/test/mocks/docs/testing.solo.io.project.sk.md new file mode 100644 index 000000000..30b98c42c --- /dev/null +++ b/test/mocks/docs/testing.solo.io.project.sk.md @@ -0,0 +1,21 @@ + + + +### API Reference for Solo-Kit Testing + +API Version: `testing.solo.io.v1` + +mock solo-kit project + +### API Resources: +- [MockResource](./github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md#MockResource) +- [FakeResource](./github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk.md#FakeResource) +- [AnotherMockResource](./github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md#AnotherMockResource) +- [ClusterResource](./github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk.md#ClusterResource) +- [MockResource](./github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md#MockResource) +- [FakeResource](./github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/mock_resources.proto.sk.md#FakeResource) +- [MockResource](./github.com/solo-io/solo-kit/test/mocks/api/v2alpha1/mock_resources.proto.sk.md#MockResource) + + + + diff --git a/test/mocks/v1/testing_event_loop.sk.go b/test/mocks/group/testing_event_loop.sk.go similarity index 93% rename from test/mocks/v1/testing_event_loop.sk.go rename to test/mocks/group/testing_event_loop.sk.go index d13a38077..68de46e85 100644 --- a/test/mocks/v1/testing_event_loop.sk.go +++ b/test/mocks/group/testing_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" @@ -46,7 +46,7 @@ func NewTestingEventLoop(emitter TestingEmitter, syncer TestingSyncer) eventloop func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") + opts.Ctx = contextutils.WithLogger(opts.Ctx, "group.event_loop") logger := contextutils.LoggerFrom(opts.Ctx) logger.Infof("event loop started") @@ -56,7 +56,7 @@ func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<- if err != nil { return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") + go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each loop, cancel it before each loop var cancel context.CancelFunc = func() {} diff --git a/test/mocks/v1/testing_event_loop_test.go b/test/mocks/group/testing_event_loop_test.go similarity index 88% rename from test/mocks/v1/testing_event_loop_test.go rename to test/mocks/group/testing_event_loop_test.go index 64e9372c6..ea32363d6 100644 --- a/test/mocks/v1/testing_event_loop_test.go +++ b/test/mocks/group/testing_event_loop_test.go @@ -2,14 +2,14 @@ // +build solokit -package v1 +package group import ( "context" "sync" "time" - github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -60,7 +60,7 @@ var _ = Describe("TestingEventLoop", func() { podClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - podClient, err := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) + podClient, err := github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPodClient(podClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) @@ -76,7 +76,7 @@ var _ = Describe("TestingEventLoop", func() { Expect(err).NotTo(HaveOccurred()) _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) sync := &mockTestingSyncer{} el := NewTestingEventLoop(emitter, sync) diff --git a/test/mocks/v1/testing_simple_event_loop.sk.go b/test/mocks/group/testing_simple_event_loop.sk.go similarity index 95% rename from test/mocks/v1/testing_simple_event_loop.sk.go rename to test/mocks/group/testing_simple_event_loop.sk.go index dde6efa3a..1109a3f6a 100644 --- a/test/mocks/v1/testing_simple_event_loop.sk.go +++ b/test/mocks/group/testing_simple_event_loop.sk.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" @@ -43,7 +43,7 @@ func NewTestingSimpleEventLoop(emitter TestingSimpleEmitter, syncers ...TestingS } func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") + ctx = contextutils.WithLogger(ctx, "group.event_loop") logger := contextutils.LoggerFrom(ctx) logger.Infof("event loop started") @@ -54,7 +54,7 @@ func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) return nil, errors.Wrapf(err, "starting snapshot watch") } - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") + go errutils.AggregateErrs(ctx, errs, emitterErrs, "group.emitter errors") go func() { // create a new context for each syncer for each loop, cancel each before each loop syncerCancels := make(map[TestingSyncer]context.CancelFunc) diff --git a/test/mocks/v1/testing_snapshot.sk.go b/test/mocks/group/testing_snapshot.sk.go similarity index 93% rename from test/mocks/v1/testing_snapshot.sk.go rename to test/mocks/group/testing_snapshot.sk.go index 198d23acd..6dd3cbb5a 100644 --- a/test/mocks/v1/testing_snapshot.sk.go +++ b/test/mocks/group/testing_snapshot.sk.go @@ -1,11 +1,11 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "fmt" - github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" "github.com/solo-io/go-utils/hashutils" "go.uber.org/zap" @@ -17,7 +17,7 @@ type TestingSnapshot struct { Anothermockresources AnotherMockResourceList Clusterresources ClusterResourceList Mcts MockCustomTypeList - Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList + Pods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList } func (s TestingSnapshot) Clone() TestingSnapshot { diff --git a/test/mocks/v1/testing_snapshot_emitter.sk.go b/test/mocks/group/testing_snapshot_emitter.sk.go similarity index 93% rename from test/mocks/v1/testing_snapshot_emitter.sk.go rename to test/mocks/group/testing_snapshot_emitter.sk.go index df51171f3..d69f714a3 100644 --- a/test/mocks/v1/testing_snapshot_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_emitter.sk.go @@ -1,12 +1,12 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "sync" "time" - github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -48,15 +48,15 @@ type TestingEmitter interface { AnotherMockResource() AnotherMockResourceClient ClusterResource() ClusterResourceClient MockCustomType() MockCustomTypeClient - Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + Pod() github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) } -func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { +func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient) TestingEmitter { return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, fakeResource: fakeResourceClient, @@ -75,7 +75,7 @@ type testingEmitter struct { anotherMockResource AnotherMockResourceClient clusterResource ClusterResourceClient mockCustomType MockCustomTypeClient - pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + pod github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient } func (c *testingEmitter) Register() error { @@ -120,7 +120,7 @@ func (c *testingEmitter) MockCustomType() MockCustomTypeClient { return c.mockCustomType } -func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient { +func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient { return c.pod } @@ -167,7 +167,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) /* Create channel for Pod */ type podListWithNamespace struct { - list github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList + list github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList namespace string } podChan := make(chan podListWithNamespace) @@ -300,7 +300,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO fakesByNamespace := make(map[string]FakeResourceList) anothermockresourcesByNamespace := make(map[string]AnotherMockResourceList) mctsByNamespace := make(map[string]MockCustomTypeList) - podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) + podsByNamespace := make(map[string]github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) for { record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } @@ -374,7 +374,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace podsByNamespace[namespace] = podNamespacedList.list - var podList github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList + var podList github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList for _, pods := range podsByNamespace { podList = append(podList, pods...) } diff --git a/test/mocks/v1/testing_snapshot_emitter_test.go b/test/mocks/group/testing_snapshot_emitter_test.go similarity index 91% rename from test/mocks/v1/testing_snapshot_emitter_test.go rename to test/mocks/group/testing_snapshot_emitter_test.go index e4485c0a8..ee5784a6b 100644 --- a/test/mocks/v1/testing_snapshot_emitter_test.go +++ b/test/mocks/group/testing_snapshot_emitter_test.go @@ -2,14 +2,14 @@ // +build solokit -package v1 +package group import ( "context" "os" "time" - github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -30,7 +30,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) -var _ = Describe("V1Emitter", func() { +var _ = Describe("GroupEmitter", func() { if os.Getenv("RUN_KUBE_TESTS") != "1" { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return @@ -47,7 +47,7 @@ var _ = Describe("V1Emitter", func() { anotherMockResourceClient AnotherMockResourceClient clusterResourceClient ClusterResourceClient mockCustomTypeClient MockCustomTypeClient - podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient ) BeforeEach(func() { @@ -68,8 +68,10 @@ var _ = Describe("V1Emitter", func() { mockResourceClient, err = NewMockResourceClient(mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // FakeResource Constructor - fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + fakeResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: FakeResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), } fakeResourceClient, err = NewFakeResourceClient(fakeResourceClientFactory) @@ -104,7 +106,7 @@ var _ = Describe("V1Emitter", func() { Cache: memory.NewInMemoryResourceCache(), } - podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) + podClient, err = github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPodClient(podClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) }) @@ -406,7 +408,7 @@ var _ = Describe("V1Emitter", func() { Pod */ - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList, unexpectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) { drain: for { select { @@ -432,32 +434,32 @@ var _ = Describe("V1Emitter", func() { } } } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}, nil) err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod2a, pod2b}) err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + assertSnapshotpods(nil, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}) }) It("tracks snapshots on changes to any resource using AllNamespace", func() { ctx := context.Background() @@ -751,7 +753,7 @@ var _ = Describe("V1Emitter", func() { Pod */ - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList, unexpectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) { drain: for { select { @@ -777,31 +779,31 @@ var _ = Describe("V1Emitter", func() { } } } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}, nil) err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod2a, pod2b}) err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + assertSnapshotpods(nil, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}) }) }) diff --git a/test/mocks/v1/testing_snapshot_simple_emitter.sk.go b/test/mocks/group/testing_snapshot_simple_emitter.sk.go similarity index 92% rename from test/mocks/v1/testing_snapshot_simple_emitter.sk.go rename to test/mocks/group/testing_snapshot_simple_emitter.sk.go index 859b91511..e5b751635 100644 --- a/test/mocks/v1/testing_snapshot_simple_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_simple_emitter.sk.go @@ -1,13 +1,13 @@ // Code generated by solo-kit. DO NOT EDIT. -package v1 +package group import ( "context" - fmt "fmt" + "fmt" "time" - github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" "go.opencensus.io/stats" @@ -93,7 +93,7 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) case *MockCustomType: currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) - case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: + case *github_com_solo_io_solo_kit_api_external_kubernetes_group.Pod: currentSnapshot.Pods = append(currentSnapshot.Pods, typed) default: select { diff --git a/test/mocks/v1/fake_resource.sk.go b/test/mocks/v1/fake_resource.sk.go index f03e77660..6302b4679 100644 --- a/test/mocks/v1/fake_resource.sk.go +++ b/test/mocks/v1/fake_resource.sk.go @@ -28,6 +28,10 @@ func (r *FakeResource) SetMetadata(meta core.Metadata) { r.Metadata = meta } +func (r *FakeResource) SetStatus(status core.Status) { + r.Status = status +} + func (r *FakeResource) Hash() uint64 { metaCopy := r.GetMetadata() metaCopy.ResourceVersion = "" @@ -59,6 +63,14 @@ func (list FakeResourceList) AsResources() resources.ResourceList { return ress } +func (list FakeResourceList) AsInputResources() resources.InputResourceList { + var ress resources.InputResourceList + for _, fakeResource := range list { + ress = append(ress, fakeResource) + } + return ress +} + func (list FakeResourceList) Names() []string { var names []string for _, fakeResource := range list { diff --git a/test/mocks/v1/fake_resource_client_test.go b/test/mocks/v1/fake_resource_client_test.go index 9bce0f00d..6d4af21ae 100644 --- a/test/mocks/v1/fake_resource_client_test.go +++ b/test/mocks/v1/fake_resource_client_test.go @@ -73,6 +73,7 @@ func FakeResourceClientTest(namespace string, client FakeResourceClient, name1, Expect(r1.GetMetadata().ResourceVersion).NotTo(Equal(input.GetMetadata().ResourceVersion)) Expect(r1.GetMetadata().Ref()).To(Equal(input.GetMetadata().Ref())) Expect(r1.Count).To(Equal(input.Count)) + Expect(r1.Status).To(Equal(input.Status)) _, err = client.Write(input, clients.WriteOpts{ OverwriteExisting: true, diff --git a/test/mocks/v1/mock_resources.pb.go b/test/mocks/v1/mock_resources.pb.go index 4c2f00c4a..bef8472a9 100644 --- a/test/mocks/v1/mock_resources.pb.go +++ b/test/mocks/v1/mock_resources.pb.go @@ -11,8 +11,6 @@ import ( bytes "bytes" context "context" fmt "fmt" - math "math" - v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -21,6 +19,7 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -210,6 +209,7 @@ func _MockResource_OneofSizer(msg proto.Message) (n int) { type FakeResource struct { Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` Metadata core.Metadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata"` + Status core.Status `protobuf:"bytes,6,opt,name=status,proto3" json:"status"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -253,6 +253,13 @@ func (m *FakeResource) GetMetadata() core.Metadata { return core.Metadata{} } +func (m *FakeResource) GetStatus() core.Status { + if m != nil { + return m.Status + } + return core.Status{} +} + // //@solo-kit:xds-service=MockXdsResourceDiscoveryService //@solo-kit:resource.no_references @@ -306,44 +313,44 @@ func init() { } var fileDescriptor_5de7a91ad5dc71ff = []byte{ - // 577 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0xeb, 0xd4, 0x4d, 0xdb, 0xa3, 0x55, 0x85, 0x1b, 0xaa, 0xc8, 0x94, 0x26, 0x4a, 0x11, - 0x0a, 0x08, 0x6c, 0x12, 0x16, 0x94, 0x31, 0x44, 0x11, 0x4b, 0x85, 0xe4, 0x30, 0x20, 0x96, 0xe8, - 0x62, 0xbf, 0xb8, 0x87, 0xe3, 0x7b, 0xc1, 0x77, 0x76, 0xe9, 0x86, 0x32, 0xb0, 0x31, 0xf1, 0x25, - 0xf8, 0x0c, 0x7c, 0x02, 0x76, 0xf6, 0x0e, 0x88, 0x89, 0x2d, 0xdf, 0x00, 0xf9, 0x6c, 0xa7, 0x4a, - 0x29, 0x55, 0x41, 0x62, 0xf2, 0xbd, 0xfb, 0xbf, 0xdf, 0x7b, 0xef, 0x9e, 0xdf, 0x1d, 0xe9, 0xfa, - 0x4c, 0x1e, 0xc7, 0x23, 0xcb, 0xc5, 0xd0, 0x16, 0x38, 0xc1, 0x47, 0x0c, 0xb3, 0x6f, 0xc0, 0xa4, - 0x2d, 0x41, 0x48, 0x3b, 0x44, 0x37, 0x10, 0x36, 0x9d, 0x32, 0x3b, 0x69, 0x29, 0x63, 0x18, 0x81, - 0xc0, 0x38, 0x72, 0x41, 0x58, 0xd3, 0x08, 0x25, 0x1a, 0x3b, 0xa9, 0x1f, 0xe3, 0xbe, 0x95, 0x82, - 0x16, 0x43, 0x73, 0x1f, 0x78, 0x82, 0xa7, 0x19, 0xd3, 0xb6, 0x3d, 0x26, 0x5c, 0x4c, 0x20, 0x3a, - 0xcd, 0xdc, 0xcd, 0x7d, 0x1f, 0xd1, 0x9f, 0x80, 0x92, 0x29, 0xe7, 0x28, 0xa9, 0x64, 0xc8, 0xf3, - 0x60, 0x66, 0xc5, 0x47, 0x1f, 0xd5, 0xd2, 0x4e, 0x57, 0xf9, 0x6e, 0xeb, 0xaa, 0x32, 0x8b, 0xda, - 0x40, 0x52, 0x8f, 0x4a, 0x9a, 0x23, 0xf6, 0x35, 0x10, 0x21, 0xa9, 0x8c, 0xc5, 0x5f, 0xe4, 0x28, - 0xec, 0x0c, 0x69, 0x7c, 0x29, 0x91, 0xad, 0x23, 0x74, 0x03, 0x27, 0xef, 0x88, 0xd1, 0x26, 0xe5, - 0x2c, 0x66, 0xb5, 0x5c, 0xd7, 0x9a, 0x37, 0xda, 0x15, 0xcb, 0xc5, 0x08, 0x8a, 0xc6, 0x58, 0x03, - 0xa5, 0x75, 0xf5, 0xaf, 0x67, 0xb5, 0x15, 0x27, 0xf7, 0x34, 0x9e, 0x92, 0x8d, 0xa2, 0xf4, 0xea, - 0xba, 0xa2, 0xf6, 0x96, 0xa9, 0xa3, 0x5c, 0xcd, 0xb9, 0x85, 0xb7, 0x71, 0x8f, 0xe8, 0x8a, 0xd2, - 0xea, 0x5a, 0x73, 0xb3, 0xbb, 0xfd, 0xf3, 0xac, 0xb6, 0xa9, 0x1a, 0xf0, 0x46, 0x20, 0x77, 0xce, - 0x97, 0xc6, 0x43, 0xb2, 0x23, 0x30, 0x84, 0xa1, 0x17, 0x87, 0xa3, 0xe1, 0x98, 0xc1, 0xc4, 0xab, - 0x7a, 0x0a, 0xd1, 0xdf, 0xcf, 0x75, 0xcd, 0xd9, 0x4e, 0xc5, 0x5e, 0x1c, 0x8e, 0xfa, 0xa9, 0x64, - 0xdc, 0x21, 0x9b, 0xc8, 0x01, 0xc7, 0x43, 0xe4, 0x50, 0x5d, 0x4d, 0xfd, 0x9e, 0xaf, 0x38, 0x1b, - 0x6a, 0xeb, 0x05, 0x87, 0x73, 0x59, 0x9e, 0x60, 0xb5, 0x54, 0xd7, 0x9a, 0x1b, 0x0b, 0xf9, 0xe5, - 0x09, 0x76, 0x76, 0x67, 0x73, 0x5d, 0x27, 0xa5, 0x30, 0x98, 0xcd, 0xf5, 0x75, 0x63, 0x4d, 0x4d, - 0x4f, 0x77, 0x97, 0xdc, 0x4c, 0x67, 0x64, 0x98, 0x81, 0xaa, 0x02, 0xd1, 0x10, 0x64, 0xab, 0x4f, - 0x03, 0x58, 0xf4, 0xae, 0x42, 0xd6, 0x5c, 0x8c, 0xb9, 0x54, 0xc7, 0xd9, 0x76, 0x32, 0xe3, 0xdf, - 0xbb, 0x53, 0x54, 0x32, 0xce, 0x2b, 0x19, 0xd3, 0x00, 0x44, 0xc3, 0x26, 0xb7, 0xd2, 0x1f, 0xf6, - 0xca, 0x13, 0x45, 0xde, 0x67, 0xc8, 0xc7, 0xcc, 0x37, 0xf6, 0x48, 0xd9, 0xc3, 0x90, 0x32, 0x9e, - 0x75, 0xd3, 0xc9, 0xad, 0xf6, 0x87, 0x55, 0x52, 0xbb, 0x40, 0xf4, 0x8a, 0x81, 0x1e, 0x40, 0x94, - 0x30, 0x17, 0x0c, 0x8f, 0xdc, 0x1e, 0xc8, 0x08, 0x68, 0x78, 0x79, 0xe8, 0x03, 0x4b, 0xdd, 0x07, - 0x8b, 0x4e, 0x99, 0x95, 0xb4, 0xad, 0x05, 0xee, 0xc0, 0xdb, 0x18, 0x84, 0x34, 0x6b, 0x7f, 0xd4, - 0xc5, 0x14, 0xb9, 0x80, 0xc6, 0x4a, 0x53, 0x7b, 0xac, 0x19, 0x21, 0x31, 0x7b, 0x30, 0x91, 0xf4, - 0xf2, 0x24, 0x87, 0x17, 0x82, 0xa4, 0x9e, 0xbf, 0x65, 0xba, 0x7b, 0xb5, 0xd3, 0x52, 0xba, 0x8f, - 0x1a, 0x31, 0xfb, 0x20, 0xdd, 0xe3, 0xff, 0x74, 0x28, 0x6b, 0xf6, 0xed, 0xc7, 0xa7, 0x52, 0xb3, - 0x71, 0xb8, 0xf4, 0x46, 0x74, 0xd2, 0x81, 0x79, 0xe7, 0x89, 0xe2, 0x8d, 0x71, 0x55, 0xb6, 0x8e, - 0xf6, 0xa0, 0x6b, 0x7f, 0xfe, 0x7e, 0xa0, 0xbd, 0xbe, 0x7f, 0xcd, 0xf7, 0x2a, 0x69, 0x8d, 0xca, - 0xea, 0x8e, 0x3e, 0xf9, 0x15, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x3d, 0x67, 0xce, 0xe3, 0x04, 0x00, - 0x00, + // 582 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xae, 0x53, 0x37, 0x6d, 0x8f, 0x56, 0x15, 0x6e, 0xa9, 0x22, 0x53, 0x9a, 0x28, 0x45, 0x28, + 0x20, 0xb0, 0x49, 0x58, 0x50, 0xc6, 0x10, 0x45, 0x2c, 0x15, 0x92, 0xc3, 0x80, 0x58, 0xa2, 0x8b, + 0xfd, 0xe2, 0x1e, 0x8e, 0xef, 0x05, 0xdf, 0xd9, 0xa5, 0x1b, 0xca, 0xc0, 0xc6, 0xc4, 0x8f, 0x80, + 0xdf, 0xc0, 0x2f, 0x60, 0x67, 0xef, 0x80, 0x98, 0xd8, 0xf2, 0x0f, 0x90, 0xcf, 0x76, 0xaa, 0x94, + 0x52, 0x15, 0x10, 0x93, 0xef, 0xf9, 0xfb, 0xbe, 0xf7, 0xbe, 0x7b, 0x77, 0xf7, 0x48, 0xc7, 0x67, + 0xf2, 0x28, 0x1e, 0x5a, 0x2e, 0x86, 0xb6, 0xc0, 0x31, 0x3e, 0x60, 0x98, 0x7d, 0x03, 0x26, 0x6d, + 0x09, 0x42, 0xda, 0x21, 0xba, 0x81, 0xb0, 0xe9, 0x84, 0xd9, 0x49, 0x53, 0x05, 0x83, 0x08, 0x04, + 0xc6, 0x91, 0x0b, 0xc2, 0x9a, 0x44, 0x28, 0xd1, 0xd8, 0x4a, 0x79, 0x8c, 0xfb, 0x56, 0x2a, 0xb4, + 0x18, 0x9a, 0x7b, 0xc0, 0x13, 0x3c, 0xc9, 0x34, 0x2d, 0xdb, 0x63, 0xc2, 0xc5, 0x04, 0xa2, 0x93, + 0x8c, 0x6e, 0xee, 0xf9, 0x88, 0xfe, 0x18, 0x14, 0x4c, 0x39, 0x47, 0x49, 0x25, 0x43, 0x9e, 0x27, + 0x33, 0x77, 0x7c, 0xf4, 0x51, 0x2d, 0xed, 0x74, 0x95, 0xff, 0x6d, 0x5e, 0x66, 0xb3, 0xf0, 0x06, + 0x92, 0x7a, 0x54, 0xd2, 0x5c, 0x62, 0x5f, 0x41, 0x22, 0x24, 0x95, 0xb1, 0xf8, 0x83, 0x1a, 0x45, + 0x9c, 0x49, 0xea, 0x9f, 0x4b, 0x64, 0xe3, 0x10, 0xdd, 0xc0, 0xc9, 0x3b, 0x62, 0xb4, 0x48, 0x39, + 0xcb, 0x59, 0x29, 0xd7, 0xb4, 0xc6, 0xb5, 0xd6, 0x8e, 0xe5, 0x62, 0x04, 0x45, 0x63, 0xac, 0xbe, + 0xc2, 0x3a, 0xfa, 0x97, 0xd3, 0xea, 0x92, 0x93, 0x33, 0x8d, 0xc7, 0x64, 0xad, 0xb0, 0x5e, 0x59, + 0x55, 0xaa, 0xdd, 0x45, 0xd5, 0x61, 0x8e, 0xe6, 0xba, 0x39, 0xdb, 0xb8, 0x43, 0x74, 0xa5, 0xd2, + 0x6a, 0x5a, 0x63, 0xbd, 0xb3, 0xf9, 0xe3, 0xb4, 0xba, 0xae, 0x1a, 0xf0, 0x4a, 0x20, 0x77, 0xce, + 0x96, 0xc6, 0x7d, 0xb2, 0x25, 0x30, 0x84, 0x81, 0x17, 0x87, 0xc3, 0xc1, 0x88, 0xc1, 0xd8, 0xab, + 0x78, 0x4a, 0xa2, 0xbf, 0x9d, 0xe9, 0x9a, 0xb3, 0x99, 0x82, 0xdd, 0x38, 0x1c, 0xf6, 0x52, 0xc8, + 0xb8, 0x45, 0xd6, 0x91, 0x03, 0x8e, 0x06, 0xc8, 0xa1, 0xb2, 0x9c, 0xf2, 0x9e, 0x2e, 0x39, 0x6b, + 0xea, 0xd7, 0x33, 0x0e, 0x67, 0xb0, 0x3c, 0xc6, 0x4a, 0xa9, 0xa6, 0x35, 0xd6, 0xe6, 0xf0, 0xf3, + 0x63, 0x6c, 0x6f, 0x4f, 0x67, 0xba, 0x4e, 0x4a, 0x61, 0x30, 0x9d, 0xe9, 0xab, 0xc6, 0x8a, 0xba, + 0x3d, 0x9d, 0x6d, 0x72, 0x3d, 0xbd, 0x23, 0x83, 0x4c, 0xa8, 0x1c, 0x88, 0xfa, 0x47, 0x8d, 0x6c, + 0xf4, 0x68, 0x00, 0xf3, 0xe6, 0xed, 0x90, 0x15, 0x17, 0x63, 0x2e, 0xd5, 0x7e, 0x36, 0x9d, 0x2c, + 0xf8, 0x87, 0xf6, 0xfc, 0xc5, 0x61, 0x14, 0xf6, 0x47, 0xb9, 0xfd, 0x11, 0x0d, 0x40, 0xd4, 0x6d, + 0x72, 0x23, 0x3d, 0xe5, 0x17, 0x9e, 0x28, 0xbc, 0x3e, 0x41, 0x3e, 0x62, 0xbe, 0xb1, 0x4b, 0xca, + 0x1e, 0x86, 0x94, 0xf1, 0xec, 0x08, 0x9c, 0x3c, 0x6a, 0xbd, 0x5b, 0x26, 0xd5, 0x73, 0x8a, 0x6e, + 0xf1, 0x0a, 0xfa, 0x10, 0x25, 0xcc, 0x05, 0xc3, 0x23, 0x37, 0xfb, 0x32, 0x02, 0x1a, 0x5e, 0x9c, + 0x7a, 0xdf, 0x52, 0x8f, 0xc8, 0xa2, 0x13, 0x66, 0x25, 0x2d, 0x6b, 0x2e, 0x77, 0xe0, 0x75, 0x0c, + 0x42, 0x9a, 0xd5, 0xdf, 0xe2, 0x62, 0x82, 0x5c, 0x40, 0x7d, 0xa9, 0xa1, 0x3d, 0xd4, 0x8c, 0x90, + 0x98, 0x5d, 0x18, 0x4b, 0x7a, 0x71, 0x91, 0x83, 0x73, 0x49, 0x52, 0xe6, 0x2f, 0x95, 0x6e, 0x5f, + 0x4e, 0x5a, 0x28, 0xf7, 0x5e, 0x23, 0x66, 0x0f, 0xa4, 0x7b, 0xf4, 0x9f, 0x36, 0x65, 0x4d, 0xbf, + 0x7e, 0xff, 0x50, 0x6a, 0xd4, 0x0f, 0x16, 0x06, 0x4b, 0x3b, 0xbd, 0x65, 0x6f, 0x3c, 0x51, 0x0c, + 0x26, 0x57, 0x55, 0x6b, 0x6b, 0xf7, 0x3a, 0xf6, 0xa7, 0x6f, 0xfb, 0xda, 0xcb, 0xbb, 0x57, 0x1c, + 0x72, 0x49, 0x73, 0x58, 0x56, 0x0f, 0xfb, 0xd1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xbf, + 0x07, 0x09, 0x18, 0x05, 0x00, 0x00, } func (this *MockResource) Equal(that interface{}) bool { @@ -464,6 +471,9 @@ func (this *FakeResource) Equal(that interface{}) bool { if !this.Metadata.Equal(&that1.Metadata) { return false } + if !this.Status.Equal(&that1.Status) { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } diff --git a/test/mocks/v1/more_mock_resources.pb.go b/test/mocks/v1/more_mock_resources.pb.go index f598a1cec..70101aa4f 100644 --- a/test/mocks/v1/more_mock_resources.pb.go +++ b/test/mocks/v1/more_mock_resources.pb.go @@ -6,11 +6,10 @@ package v1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1/testing.solo.io_suite_test.go b/test/mocks/v1/testing.solo.io_suite_test.go index 300f58bc0..c999c0c35 100644 --- a/test/mocks/v1/testing.solo.io_suite_test.go +++ b/test/mocks/v1/testing.solo.io_suite_test.go @@ -36,13 +36,13 @@ var ( Expect(err).NotTo(HaveOccurred()) clientset, err := apiexts.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) - testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("fakes.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) + testutils.ErrorNotOccuredOrNotFound(err) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/test/mocks/v1alpha1/fake_resource.sk.go b/test/mocks/v1alpha1/fake_resource.sk.go index 1f52ec215..23ae952f8 100644 --- a/test/mocks/v1alpha1/fake_resource.sk.go +++ b/test/mocks/v1alpha1/fake_resource.sk.go @@ -126,7 +126,7 @@ func (o *FakeResource) DeepCopyObject() runtime.Object { var ( FakeResourceGVK = schema.GroupVersionKind{ Version: "v1alpha1", - Group: "crds.testing.solo.io", + Group: "testing.solo.io", Kind: "FakeResource", } FakeResourceCrd = crd.NewCrd( diff --git a/test/mocks/v1alpha1/mock_resource.sk.go b/test/mocks/v1alpha1/mock_resource.sk.go index 03d04177d..fc9ac070b 100644 --- a/test/mocks/v1alpha1/mock_resource.sk.go +++ b/test/mocks/v1alpha1/mock_resource.sk.go @@ -139,7 +139,7 @@ func (o *MockResource) DeepCopyObject() runtime.Object { var ( MockResourceGVK = schema.GroupVersionKind{ Version: "v1alpha1", - Group: "crds.testing.solo.io", + Group: "testing.solo.io", Kind: "MockResource", } MockResourceCrd = crd.NewCrd( diff --git a/test/mocks/v1alpha1/mock_resources.pb.go b/test/mocks/v1alpha1/mock_resources.pb.go index 621c296aa..404fe856e 100644 --- a/test/mocks/v1alpha1/mock_resources.pb.go +++ b/test/mocks/v1alpha1/mock_resources.pb.go @@ -10,13 +10,12 @@ package v1alpha1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1alpha1/more_mock_resources.pb.go b/test/mocks/v1alpha1/more_mock_resources.pb.go deleted file mode 100644 index 42465e30a..000000000 --- a/test/mocks/v1alpha1/more_mock_resources.pb.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/more_mock_resources.proto - -package v1alpha1 - -import ( - bytes "bytes" - fmt "fmt" - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// -//Description of the AnotherMockResource -type AnotherMockResource struct { - Metadata core.Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata"` - Status core.Status `protobuf:"bytes,6,opt,name=status,proto3" json:"status"` - // comments that go above the basic field in our docs - BasicField string `protobuf:"bytes,2,opt,name=basic_field,json=basicField,proto3" json:"basic_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AnotherMockResource) Reset() { *m = AnotherMockResource{} } -func (m *AnotherMockResource) String() string { return proto.CompactTextString(m) } -func (*AnotherMockResource) ProtoMessage() {} -func (*AnotherMockResource) Descriptor() ([]byte, []int) { - return fileDescriptor_adb5d88e228084e4, []int{0} -} -func (m *AnotherMockResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AnotherMockResource.Unmarshal(m, b) -} -func (m *AnotherMockResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AnotherMockResource.Marshal(b, m, deterministic) -} -func (m *AnotherMockResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AnotherMockResource.Merge(m, src) -} -func (m *AnotherMockResource) XXX_Size() int { - return xxx_messageInfo_AnotherMockResource.Size(m) -} -func (m *AnotherMockResource) XXX_DiscardUnknown() { - xxx_messageInfo_AnotherMockResource.DiscardUnknown(m) -} - -var xxx_messageInfo_AnotherMockResource proto.InternalMessageInfo - -func (m *AnotherMockResource) GetMetadata() core.Metadata { - if m != nil { - return m.Metadata - } - return core.Metadata{} -} - -func (m *AnotherMockResource) GetStatus() core.Status { - if m != nil { - return m.Status - } - return core.Status{} -} - -func (m *AnotherMockResource) GetBasicField() string { - if m != nil { - return m.BasicField - } - return "" -} - -type ClusterResource struct { - Metadata core.Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata"` - Status core.Status `protobuf:"bytes,6,opt,name=status,proto3" json:"status"` - // comments that go above the basic field in our docs - BasicField string `protobuf:"bytes,2,opt,name=basic_field,json=basicField,proto3" json:"basic_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterResource) Reset() { *m = ClusterResource{} } -func (m *ClusterResource) String() string { return proto.CompactTextString(m) } -func (*ClusterResource) ProtoMessage() {} -func (*ClusterResource) Descriptor() ([]byte, []int) { - return fileDescriptor_adb5d88e228084e4, []int{1} -} -func (m *ClusterResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterResource.Unmarshal(m, b) -} -func (m *ClusterResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterResource.Marshal(b, m, deterministic) -} -func (m *ClusterResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterResource.Merge(m, src) -} -func (m *ClusterResource) XXX_Size() int { - return xxx_messageInfo_ClusterResource.Size(m) -} -func (m *ClusterResource) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterResource proto.InternalMessageInfo - -func (m *ClusterResource) GetMetadata() core.Metadata { - if m != nil { - return m.Metadata - } - return core.Metadata{} -} - -func (m *ClusterResource) GetStatus() core.Status { - if m != nil { - return m.Status - } - return core.Status{} -} - -func (m *ClusterResource) GetBasicField() string { - if m != nil { - return m.BasicField - } - return "" -} - -func init() { - proto.RegisterType((*AnotherMockResource)(nil), "testing.solo.io.AnotherMockResource") - proto.RegisterType((*ClusterResource)(nil), "testing.solo.io.ClusterResource") -} - -func init() { - proto.RegisterFile("github.com/solo-io/solo-kit/test/mocks/api/v1alpha1/more_mock_resources.proto", fileDescriptor_adb5d88e228084e4) -} - -var fileDescriptor_adb5d88e228084e4 = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x92, 0xc1, 0x4e, 0x32, 0x31, - 0x10, 0xc7, 0xbf, 0xf2, 0xad, 0x44, 0xcb, 0x01, 0x53, 0x09, 0xd9, 0x70, 0x10, 0x82, 0x31, 0xe1, - 0x62, 0x1b, 0x30, 0x26, 0xc6, 0x9b, 0x98, 0x78, 0xe3, 0x82, 0x37, 0x2f, 0xa4, 0x94, 0xba, 0x34, - 0xec, 0x32, 0xa4, 0xed, 0xfa, 0x00, 0xfb, 0x34, 0xbe, 0x89, 0xfa, 0x12, 0x1e, 0x7c, 0x83, 0x7d, - 0x03, 0xd3, 0xed, 0xee, 0x26, 0x5e, 0x0c, 0xde, 0x3c, 0xb5, 0x9d, 0x99, 0x5f, 0xe7, 0xff, 0xcf, - 0x0c, 0x9e, 0x45, 0xca, 0xae, 0xd3, 0x25, 0x15, 0x90, 0x30, 0x03, 0x31, 0x5c, 0x28, 0xf0, 0xe7, - 0x46, 0x59, 0x66, 0xa5, 0xb1, 0x2c, 0x01, 0xb1, 0x31, 0x8c, 0xef, 0x14, 0x7b, 0x1e, 0xf3, 0x78, - 0xb7, 0xe6, 0x63, 0x96, 0x80, 0x96, 0x0b, 0x17, 0x5f, 0x68, 0x69, 0x20, 0xd5, 0x42, 0x1a, 0xba, - 0xd3, 0x60, 0x81, 0xb4, 0x1d, 0xa2, 0xb6, 0x11, 0x75, 0x7f, 0x50, 0x05, 0xbd, 0x4e, 0x04, 0x11, - 0x14, 0x39, 0xe6, 0x6e, 0xbe, 0xac, 0x37, 0xfe, 0xa9, 0xab, 0x6f, 0xc5, 0x12, 0x69, 0xf9, 0x8a, - 0x5b, 0x5e, 0x22, 0x6c, 0x0f, 0xc4, 0x58, 0x6e, 0x53, 0xf3, 0x8b, 0x1e, 0xd5, 0xdb, 0x23, 0xc3, - 0x77, 0x84, 0x4f, 0x6e, 0xb7, 0x60, 0xd7, 0x52, 0xcf, 0x40, 0x6c, 0xe6, 0xa5, 0x39, 0x72, 0x8d, - 0x0f, 0x2b, 0x35, 0x21, 0x1a, 0xa0, 0x51, 0x6b, 0xd2, 0xa5, 0x02, 0xb4, 0xac, 0x5c, 0xd2, 0x59, - 0x99, 0x9d, 0x06, 0x6f, 0x1f, 0xfd, 0x7f, 0xf3, 0xba, 0x9a, 0x4c, 0x70, 0xd3, 0x8b, 0x0a, 0x9b, - 0x05, 0xd7, 0xf9, 0xce, 0x3d, 0x14, 0xb9, 0x92, 0x2a, 0x2b, 0x49, 0x1f, 0xb7, 0x96, 0xdc, 0x28, - 0xb1, 0x78, 0x52, 0x32, 0x5e, 0x85, 0x8d, 0x01, 0x1a, 0x1d, 0xcd, 0x71, 0x11, 0xba, 0x77, 0x91, - 0x9b, 0xb3, 0x2c, 0x0f, 0x0e, 0xf0, 0x7f, 0x9e, 0xe8, 0x2c, 0x0f, 0xba, 0xa4, 0xc3, 0xbd, 0x64, - 0x37, 0x90, 0x7a, 0x1e, 0xc3, 0x57, 0x84, 0xdb, 0x77, 0x71, 0x6a, 0xac, 0xd4, 0x7f, 0xd5, 0xc7, - 0xb9, 0xf7, 0x21, 0x62, 0xe7, 0x83, 0x90, 0x63, 0xe1, 0xe5, 0xd6, 0x1e, 0xb2, 0x3c, 0x68, 0x84, - 0x68, 0x7a, 0xf5, 0xf2, 0x79, 0x8a, 0x1e, 0xd9, 0x9e, 0x8b, 0x5a, 0x2d, 0xe9, 0xb2, 0x59, 0xcc, - 0xf4, 0xf2, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x51, 0x9f, 0xd0, 0xc7, 0xe2, 0x02, 0x00, 0x00, -} - -func (this *AnotherMockResource) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AnotherMockResource) - if !ok { - that2, ok := that.(AnotherMockResource) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Metadata.Equal(&that1.Metadata) { - return false - } - if !this.Status.Equal(&that1.Status) { - return false - } - if this.BasicField != that1.BasicField { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ClusterResource) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ClusterResource) - if !ok { - that2, ok := that.(ClusterResource) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Metadata.Equal(&that1.Metadata) { - return false - } - if !this.Status.Equal(&that1.Status) { - return false - } - if this.BasicField != that1.BasicField { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} diff --git a/test/mocks/v1alpha1/testing.solo.io_suite_test.go b/test/mocks/v1alpha1/testing.solo.io_suite_test.go index 57ce95c5a..4272f0dde 100644 --- a/test/mocks/v1alpha1/testing.solo.io_suite_test.go +++ b/test/mocks/v1alpha1/testing.solo.io_suite_test.go @@ -36,13 +36,13 @@ var ( Expect(err).NotTo(HaveOccurred()) clientset, err := apiexts.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) - testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("fakes.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) + testutils.ErrorNotOccuredOrNotFound(err) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/test/mocks/v1alpha1/testing_simple_event_loop.sk.go b/test/mocks/v1alpha1/testing_simple_event_loop.sk.go deleted file mode 100644 index 05e3abea7..000000000 --- a/test/mocks/v1alpha1/testing_simple_event_loop.sk.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "fmt" - - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use TestingSyncDeciderWithContext -type TestingSyncDecider interface { - TestingSyncer - ShouldSync(old, new *TestingSnapshot) bool -} - -type TestingSyncDeciderWithContext interface { - TestingSyncer - ShouldSync(ctx context.Context, old, new *TestingSnapshot) bool -} - -type testingSimpleEventLoop struct { - emitter TestingSimpleEmitter - syncers []TestingSyncer -} - -func NewTestingSimpleEventLoop(emitter TestingSimpleEmitter, syncers ...TestingSyncer) eventloop.SimpleEventLoop { - return &testingSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1alpha1.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1alpha1.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[TestingSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *TestingSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(TestingSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(TestingSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("testing.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/test/mocks/v1alpha1/testing_snapshot.sk.go b/test/mocks/v1alpha1/testing_snapshot.sk.go deleted file mode 100644 index 42f788263..000000000 --- a/test/mocks/v1alpha1/testing_snapshot.sk.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "fmt" - - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap" -) - -type TestingSnapshot struct { - Mocks MockResourceList -} - -func (s TestingSnapshot) Clone() TestingSnapshot { - return TestingSnapshot{ - Mocks: s.Mocks.Clone(), - } -} - -func (s TestingSnapshot) Hash() uint64 { - return hashutils.HashAll( - s.hashMocks(), - ) -} - -func (s TestingSnapshot) hashMocks() uint64 { - return hashutils.HashAll(s.Mocks.AsInterfaces()...) -} - -func (s TestingSnapshot) HashFields() []zap.Field { - var fields []zap.Field - fields = append(fields, zap.Uint64("mocks", s.hashMocks())) - - return append(fields, zap.Uint64("snapshotHash", s.Hash())) -} - -type TestingSnapshotStringer struct { - Version uint64 - Mocks []string -} - -func (ss TestingSnapshotStringer) String() string { - s := fmt.Sprintf("TestingSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Mocks %v\n", len(ss.Mocks)) - for _, name := range ss.Mocks { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s TestingSnapshot) Stringer() TestingSnapshotStringer { - return TestingSnapshotStringer{ - Version: s.Hash(), - Mocks: s.Mocks.NamespacesDotNames(), - } -} diff --git a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go deleted file mode 100644 index 11f151c04..000000000 --- a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" -) - -var ( - mTestingSnapshotIn = stats.Int64("testing.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") - mTestingSnapshotOut = stats.Int64("testing.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") - - testingsnapshotInView = &view.View{ - Name: "testing.solo.io_snap_emitter/snap_in", - Measure: mTestingSnapshotIn, - Description: "The number of snapshots updates coming in", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotOutView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_out", - Measure: mTestingSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register(testingsnapshotInView, testingsnapshotOutView) -} - -type TestingEmitter interface { - Register() error - MockResource() MockResourceClient - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) -} - -func NewTestingEmitter(mockResourceClient MockResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, make(chan struct{})) -} - -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, emit <-chan struct{}) TestingEmitter { - return &testingEmitter{ - mockResource: mockResourceClient, - forceEmit: emit, - } -} - -type testingEmitter struct { - forceEmit <-chan struct{} - mockResource MockResourceClient -} - -func (c *testingEmitter) Register() error { - if err := c.mockResource.Register(); err != nil { - return err - } - return nil -} - -func (c *testingEmitter) MockResource() MockResourceClient { - return c.mockResource -} - -func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for MockResource */ - type mockResourceListWithNamespace struct { - list MockResourceList - namespace string - } - mockResourceChan := make(chan mockResourceListWithNamespace) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case mockResourceList := <-mockResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: - } - } - } - }(namespace) - } - - snapshots := make(chan *TestingSnapshot) - go func() { - originalSnapshot := TestingSnapshot{} - currentSnapshot := originalSnapshot.Clone() - timer := time.NewTicker(time.Second * 1) - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - stats.Record(ctx, mTestingSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - mocksByNamespace := make(map[string]MockResourceList) - - for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - close(snapshots) - done.Wait() - close(errs) - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case mockResourceNamespacedList := <-mockResourceChan: - record() - - namespace := mockResourceNamespacedList.namespace - - // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { - mockResourceList = append(mockResourceList, mocks...) - } - currentSnapshot.Mocks = mockResourceList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/test/mocks/v1alpha1/testing_snapshot_emitter_test.go b/test/mocks/v1alpha1/testing_snapshot_emitter_test.go deleted file mode 100644 index 74a570606..000000000 --- a/test/mocks/v1alpha1/testing_snapshot_emitter_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -// +build solokit - -package v1alpha1 - -import ( - "context" - "os" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/solo-io/go-utils/kubeutils" - "github.com/solo-io/go-utils/log" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" - "github.com/solo-io/solo-kit/test/helpers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - // Needed to run tests in GKE - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - // From https://github.com/kubernetes/client-go/blob/53c7adfd0294caa142d961e1f780f74081d5b15f/examples/out-of-cluster-client-configuration/main.go#L31 - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -var _ = Describe("V1Alpha1Emitter", func() { - if os.Getenv("RUN_KUBE_TESTS") != "1" { - log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") - return - } - var ( - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - cfg *rest.Config - kube kubernetes.Interface - emitter TestingEmitter - mockResourceClient MockResourceClient - ) - - BeforeEach(func() { - namespace1 = helpers.RandString(8) - namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err := kubeutils.CreateNamespacesInParallel(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - cfg, err = kubeutils.GetConfig("", "") - Expect(err).NotTo(HaveOccurred()) - // MockResource Constructor - mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), - } - - mockResourceClient, err = NewMockResourceClient(mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient) - }) - AfterEach(func() { - err := kubeutils.DeleteNamespacesInParallelBlocking(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) - It("tracks snapshots on changes to any resource", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *TestingSnapshot - - /* - MockResource - */ - - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) - - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) - - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) - }) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *TestingSnapshot - - /* - MockResource - */ - - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) - - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) - - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) - }) -}) diff --git a/test/mocks/v2alpha1/mock_resources.pb.go b/test/mocks/v2alpha1/mock_resources.pb.go index 144b025db..7c48754fa 100644 --- a/test/mocks/v2alpha1/mock_resources.pb.go +++ b/test/mocks/v2alpha1/mock_resources.pb.go @@ -6,11 +6,10 @@ package v2alpha1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v2alpha1/testing.solo.io_suite_test.go b/test/mocks/v2alpha1/testing.solo.io_suite_test.go index 358919f9b..2d6a94828 100644 --- a/test/mocks/v2alpha1/testing.solo.io_suite_test.go +++ b/test/mocks/v2alpha1/testing.solo.io_suite_test.go @@ -36,13 +36,13 @@ var ( Expect(err).NotTo(HaveOccurred()) clientset, err := apiexts.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) - testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("fakes.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) - err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("mocks.testing.solo.io", &metav1.DeleteOptions{}) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("anothermockresources.testing.solo.io", &metav1.DeleteOptions{}) + testutils.ErrorNotOccuredOrNotFound(err) + err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete("clusterresources.testing.solo.io", &metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/test/mocks/v2alpha1/testing_event_loop.sk.go b/test/mocks/v2alpha1/testing_event_loop.sk.go deleted file mode 100644 index 4d76c8e71..000000000 --- a/test/mocks/v2alpha1/testing_event_loop.sk.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v2alpha1 - -import ( - "context" - - "go.opencensus.io/trace" - - "github.com/hashicorp/go-multierror" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type TestingSyncer interface { - Sync(context.Context, *TestingSnapshot) error -} - -type TestingSyncers []TestingSyncer - -func (s TestingSyncers) Sync(ctx context.Context, snapshot *TestingSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type testingEventLoop struct { - emitter TestingEmitter - syncer TestingSyncer -} - -func NewTestingEventLoop(emitter TestingEmitter, syncer TestingSyncer) eventloop.EventLoop { - return &testingEventLoop{ - emitter: emitter, - syncer: syncer, - } -} - -func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v2alpha1.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v2alpha1.emitter errors") - go func() { - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - // cancel any open watches from previous loop - cancel() - - ctx, span := trace.StartSpan(opts.Ctx, "testing.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/test/mocks/v2alpha1/testing_event_loop_test.go b/test/mocks/v2alpha1/testing_event_loop_test.go deleted file mode 100644 index e5bee236e..000000000 --- a/test/mocks/v2alpha1/testing_event_loop_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -// +build solokit - -package v2alpha1 - -import ( - "context" - "sync" - "time" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" -) - -var _ = Describe("TestingEventLoop", func() { - var ( - namespace string - emitter TestingEmitter - err error - ) - - BeforeEach(func() { - - mockResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } - mockResourceClient, err := NewMockResourceClient(mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - - fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } - fakeResourceClient, err := testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - - emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient) - }) - It("runs sync function on a new snapshot", func() { - _, err = emitter.MockResource().Write(NewMockResource(namespace, "jerry"), clients.WriteOpts{}) - Expect(err).NotTo(HaveOccurred()) - _, err = emitter.FakeResource().Write(testing_solo_io.NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) - Expect(err).NotTo(HaveOccurred()) - sync := &mockTestingSyncer{} - el := NewTestingEventLoop(emitter, sync) - _, err := el.Run([]string{namespace}, clients.WatchOpts{}) - Expect(err).NotTo(HaveOccurred()) - Eventually(sync.Synced, 5*time.Second).Should(BeTrue()) - }) -}) - -type mockTestingSyncer struct { - synced bool - mutex sync.Mutex -} - -func (s *mockTestingSyncer) Synced() bool { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.synced -} - -func (s *mockTestingSyncer) Sync(ctx context.Context, snap *TestingSnapshot) error { - s.mutex.Lock() - s.synced = true - s.mutex.Unlock() - return nil -} diff --git a/test/mocks/v2alpha1/testing_snapshot.sk.go b/test/mocks/v2alpha1/testing_snapshot.sk.go deleted file mode 100644 index 7c4113b46..000000000 --- a/test/mocks/v2alpha1/testing_snapshot.sk.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v2alpha1 - -import ( - "fmt" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap" -) - -type TestingSnapshot struct { - Mocks MockResourceList - Fakes testing_solo_io.FakeResourceList -} - -func (s TestingSnapshot) Clone() TestingSnapshot { - return TestingSnapshot{ - Mocks: s.Mocks.Clone(), - Fakes: s.Fakes.Clone(), - } -} - -func (s TestingSnapshot) Hash() uint64 { - return hashutils.HashAll( - s.hashMocks(), - s.hashFakes(), - ) -} - -func (s TestingSnapshot) hashMocks() uint64 { - return hashutils.HashAll(s.Mocks.AsInterfaces()...) -} - -func (s TestingSnapshot) hashFakes() uint64 { - return hashutils.HashAll(s.Fakes.AsInterfaces()...) -} - -func (s TestingSnapshot) HashFields() []zap.Field { - var fields []zap.Field - fields = append(fields, zap.Uint64("mocks", s.hashMocks())) - fields = append(fields, zap.Uint64("fakes", s.hashFakes())) - - return append(fields, zap.Uint64("snapshotHash", s.Hash())) -} - -type TestingSnapshotStringer struct { - Version uint64 - Mocks []string - Fakes []string -} - -func (ss TestingSnapshotStringer) String() string { - s := fmt.Sprintf("TestingSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Mocks %v\n", len(ss.Mocks)) - for _, name := range ss.Mocks { - s += fmt.Sprintf(" %v\n", name) - } - - s += fmt.Sprintf(" Fakes %v\n", len(ss.Fakes)) - for _, name := range ss.Fakes { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s TestingSnapshot) Stringer() TestingSnapshotStringer { - return TestingSnapshotStringer{ - Version: s.Hash(), - Mocks: s.Mocks.NamespacesDotNames(), - Fakes: s.Fakes.NamespacesDotNames(), - } -} diff --git a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go deleted file mode 100644 index d45751ec8..000000000 --- a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go +++ /dev/null @@ -1,223 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v2alpha1 - -import ( - "sync" - "time" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" -) - -var ( - mTestingSnapshotIn = stats.Int64("testing.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") - mTestingSnapshotOut = stats.Int64("testing.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") - - testingsnapshotInView = &view.View{ - Name: "testing.solo.io_snap_emitter/snap_in", - Measure: mTestingSnapshotIn, - Description: "The number of snapshots updates coming in", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotOutView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_out", - Measure: mTestingSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register(testingsnapshotInView, testingsnapshotOutView) -} - -type TestingEmitter interface { - Register() error - MockResource() MockResourceClient - FakeResource() testing_solo_io.FakeResourceClient - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) -} - -func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, make(chan struct{})) -} - -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, emit <-chan struct{}) TestingEmitter { - return &testingEmitter{ - mockResource: mockResourceClient, - fakeResource: fakeResourceClient, - forceEmit: emit, - } -} - -type testingEmitter struct { - forceEmit <-chan struct{} - mockResource MockResourceClient - fakeResource testing_solo_io.FakeResourceClient -} - -func (c *testingEmitter) Register() error { - if err := c.mockResource.Register(); err != nil { - return err - } - if err := c.fakeResource.Register(); err != nil { - return err - } - return nil -} - -func (c *testingEmitter) MockResource() MockResourceClient { - return c.mockResource -} - -func (c *testingEmitter) FakeResource() testing_solo_io.FakeResourceClient { - return c.fakeResource -} - -func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for MockResource */ - type mockResourceListWithNamespace struct { - list MockResourceList - namespace string - } - mockResourceChan := make(chan mockResourceListWithNamespace) - /* Create channel for FakeResource */ - type fakeResourceListWithNamespace struct { - list testing_solo_io.FakeResourceList - namespace string - } - fakeResourceChan := make(chan fakeResourceListWithNamespace) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - /* Setup namespaced watch for FakeResource */ - fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting FakeResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case mockResourceList := <-mockResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: - } - case fakeResourceList := <-fakeResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: - } - } - } - }(namespace) - } - - snapshots := make(chan *TestingSnapshot) - go func() { - originalSnapshot := TestingSnapshot{} - currentSnapshot := originalSnapshot.Clone() - timer := time.NewTicker(time.Second * 1) - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - stats.Record(ctx, mTestingSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - mocksByNamespace := make(map[string]MockResourceList) - fakesByNamespace := make(map[string]testing_solo_io.FakeResourceList) - - for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - close(snapshots) - done.Wait() - close(errs) - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case mockResourceNamespacedList := <-mockResourceChan: - record() - - namespace := mockResourceNamespacedList.namespace - - // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { - mockResourceList = append(mockResourceList, mocks...) - } - currentSnapshot.Mocks = mockResourceList.Sort() - case fakeResourceNamespacedList := <-fakeResourceChan: - record() - - namespace := fakeResourceNamespacedList.namespace - - // merge lists by namespace - fakesByNamespace[namespace] = fakeResourceNamespacedList.list - var fakeResourceList testing_solo_io.FakeResourceList - for _, fakes := range fakesByNamespace { - fakeResourceList = append(fakeResourceList, fakes...) - } - currentSnapshot.Fakes = fakeResourceList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/test/mocks/v2alpha1/testing_snapshot_emitter_test.go b/test/mocks/v2alpha1/testing_snapshot_emitter_test.go deleted file mode 100644 index 3e2975f24..000000000 --- a/test/mocks/v2alpha1/testing_snapshot_emitter_test.go +++ /dev/null @@ -1,333 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -// +build solokit - -package v2alpha1 - -import ( - "context" - "os" - "time" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/solo-io/go-utils/kubeutils" - "github.com/solo-io/go-utils/log" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" - "github.com/solo-io/solo-kit/test/helpers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - // Needed to run tests in GKE - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - // From https://github.com/kubernetes/client-go/blob/53c7adfd0294caa142d961e1f780f74081d5b15f/examples/out-of-cluster-client-configuration/main.go#L31 - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -var _ = Describe("V2Alpha1Emitter", func() { - if os.Getenv("RUN_KUBE_TESTS") != "1" { - log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") - return - } - var ( - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - cfg *rest.Config - kube kubernetes.Interface - emitter TestingEmitter - mockResourceClient MockResourceClient - fakeResourceClient testing_solo_io.FakeResourceClient - ) - - BeforeEach(func() { - namespace1 = helpers.RandString(8) - namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err := kubeutils.CreateNamespacesInParallel(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - cfg, err = kubeutils.GetConfig("", "") - Expect(err).NotTo(HaveOccurred()) - // MockResource Constructor - mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), - } - - mockResourceClient, err = NewMockResourceClient(mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // FakeResource Constructor - fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } - - fakeResourceClient, err = testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient) - }) - AfterEach(func() { - err := kubeutils.DeleteNamespacesInParallelBlocking(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) - It("tracks snapshots on changes to any resource", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *TestingSnapshot - - /* - MockResource - */ - - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) - - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) - - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) - - /* - FakeResource - */ - - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectFakes { - if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectFakes { - if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) - - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) - - err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) - }) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *TestingSnapshot - - /* - MockResource - */ - - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) - - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) - - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) - - /* - FakeResource - */ - - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectFakes { - if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectFakes { - if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) - - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) - - err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) - }) -}) diff --git a/test/mocks/v2alpha1/testing_snapshot_simple_emitter.sk.go b/test/mocks/v2alpha1/testing_snapshot_simple_emitter.sk.go deleted file mode 100644 index 7c0b9e463..000000000 --- a/test/mocks/v2alpha1/testing_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v2alpha1 - -import ( - "context" - fmt "fmt" - "time" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - "go.opencensus.io/stats" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type TestingSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) -} - -func NewTestingSimpleEmitter(aggregatedWatch clients.ResourceWatch) TestingSimpleEmitter { - return NewTestingSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewTestingSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TestingSimpleEmitter { - return &testingSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type testingSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) { - snapshots := make(chan *TestingSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "testing-emitter") - - go func() { - originalSnapshot := TestingSnapshot{} - currentSnapshot := originalSnapshot.Clone() - timer := time.NewTicker(time.Second * 1) - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - stats.Record(ctx, mTestingSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = TestingSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *MockResource: - currentSnapshot.Mocks = append(currentSnapshot.Mocks, typed) - case *testing_solo_io.FakeResource: - currentSnapshot.Fakes = append(currentSnapshot.Fakes, typed) - default: - select { - case errs <- fmt.Errorf("TestingSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} From bf325a2d31c6ed479a36e5bb3aac66992ff6bab4 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 10:27:01 -0400 Subject: [PATCH 02/17] quick comment changes --- pkg/api/v1/clients/kube/crd/crd.go | 6 ------ pkg/code-generator/cmd/main.go | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/pkg/api/v1/clients/kube/crd/crd.go b/pkg/api/v1/clients/kube/crd/crd.go index 948e007a7..d113758a1 100644 --- a/pkg/api/v1/clients/kube/crd/crd.go +++ b/pkg/api/v1/clients/kube/crd/crd.go @@ -88,12 +88,6 @@ func NewCrd( if err := c.AddToScheme(scheme.Scheme); err != nil { log.Panicf("error while adding [%v] CRD to scheme: %v", c.FullName(), err) } - // if res, ok := objType.(resources.Resource); ok { - // c.Version.ProtoSpec = res - // } else { - // log.Panicf("error while creating crd for %v, must extend " + - // "resources.Resource interface", c.FullName()) - // } return c } diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index 8f5f8476b..da059806f 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -76,7 +76,7 @@ func Generate(opts GenerateOptions) error { return err } - // Creates a VersionConfig from each of the 'solo-kit.json' files + // Creates a SoloKitProject from each of the 'solo-kit.json' files // found in the directory tree rooted at 'absoluteRoot'. soloKitProjects, err := collectProjectsFromRoot(absoluteRoot, skipDirs) if err != nil { From ce59e2f26b61f92684bec110dc025abef84dc495 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 11:08:33 -0400 Subject: [PATCH 03/17] one api group per --- api/external/kubernetes/solo-kit.json | 98 +-- api/multicluster/v1/solo-kit.json | 48 +- pkg/code-generator/cmd/main.go | 198 ++--- pkg/code-generator/model/project.go | 24 +- test/mocks/api/solo-kit.json | 102 ++- .../google/protobuf/descriptor.proto.sk.md | 825 ++++++++++++++++++ 6 files changed, 1051 insertions(+), 244 deletions(-) create mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md diff --git a/api/external/kubernetes/solo-kit.json b/api/external/kubernetes/solo-kit.json index 1b77e5656..3126bd5f8 100644 --- a/api/external/kubernetes/solo-kit.json +++ b/api/external/kubernetes/solo-kit.json @@ -1,53 +1,51 @@ { "title": "solo-kit", - "api_groups": [ - { - "name": "kubernetes.solo.io", - "resource_group_go_package": "github.com/solo-io/solo-kit/api/external/kubernetes/group", - "version_configs": [ - { - "version": "kubernetes", - "go_package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes", - "custom_resources": [ - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/pod", - "type": "Pod", - "plural_name": "pods", - "short_name": "p" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/namespace", - "type": "KubeNamespace", - "plural_name": "kubenamespaces", - "short_name": "kn" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/configmap", - "type": "ConfigMap", - "plural_name": "configmaps", - "short_name": "cm" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/service", - "type": "Service", - "plural_name": "services", - "short_name": "svc" - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/deployment", - "type": "Deployment", - "plural_name": "deployments", - "short_name": "dp " - }, - { - "package": "github.com/solo-io/solo-kit/api/external/kubernetes/customresourcedefinition", - "type": "CustomResourceDefinition", - "plural_name": "customresourcedefinition", - "short_name": "crd " - } - ] - } - ] - } - ] + "api_group": { + "name": "kubernetes.solo.io", + "resource_group_go_package": "github.com/solo-io/solo-kit/api/external/kubernetes/group", + "version_configs": [ + { + "version": "kubernetes", + "go_package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/pod", + "type": "Pod", + "plural_name": "pods", + "short_name": "p" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/namespace", + "type": "KubeNamespace", + "plural_name": "kubenamespaces", + "short_name": "kn" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/configmap", + "type": "ConfigMap", + "plural_name": "configmaps", + "short_name": "cm" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/service", + "type": "Service", + "plural_name": "services", + "short_name": "svc" + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/deployment", + "type": "Deployment", + "plural_name": "deployments", + "short_name": "dp " + }, + { + "package": "github.com/solo-io/solo-kit/api/external/kubernetes/customresourcedefinition", + "type": "CustomResourceDefinition", + "plural_name": "customresourcedefinition", + "short_name": "crd " + } + ] + } + ] + } } \ No newline at end of file diff --git a/api/multicluster/v1/solo-kit.json b/api/multicluster/v1/solo-kit.json index 11b9f11d3..e1be679f4 100644 --- a/api/multicluster/v1/solo-kit.json +++ b/api/multicluster/v1/solo-kit.json @@ -1,31 +1,29 @@ { "title": "Solo-Kit Multicluster Manager", - "api_groups": [ - { - "name": "multicluster.solo.io", - "resource_group_go_package": "github.com/solo-io/solo-kit/pkg/multicluster/group", - "resource_groups": { - "kubeconfigs.multicluster.solo.io": [ - { - "name": "KubeConfig", - "package": "github.com/solo-io/solo-kit/api/multicluster/v1" - } - ] - }, - "version_configs": [ + "api_group": { + "name": "multicluster.solo.io", + "resource_group_go_package": "github.com/solo-io/solo-kit/pkg/multicluster/group", + "resource_groups": { + "kubeconfigs.multicluster.solo.io": [ { - "version": "v1", - "go_package": "github.com/solo-io/solo-kit/pkg/multicluster/v1", - "custom_resources": [ - { - "package": "github.com/solo-io/solo-kit/api/multicluster/v1", - "type": "KubeConfig", - "plural_name": "kubeconfigs", - "short_name": "kc" - } - ] + "name": "KubeConfig", + "package": "github.com/solo-io/solo-kit/api/multicluster/v1" } ] - } - ] + }, + "version_configs": [ + { + "version": "v1", + "go_package": "github.com/solo-io/solo-kit/pkg/multicluster/v1", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/api/multicluster/v1", + "type": "KubeConfig", + "plural_name": "kubeconfigs", + "short_name": "kc" + } + ] + } + ] + } } diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index da059806f..94171b38c 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -98,11 +98,9 @@ func Generate(opts GenerateOptions) error { return false } for _, skp := range soloKitProjects { - for _, ag := range skp.ApiGroups { - for _, vc := range ag.VersionConfigs { - if strings.HasPrefix(protoFile, filepath.Dir(skp.ProjectFile)+"/"+vc.Version) { - return true - } + for _, vc := range skp.ApiGroup.VersionConfigs { + if strings.HasPrefix(protoFile, filepath.Dir(skp.ProjectFile)+"/"+vc.Version) { + return true } } } @@ -127,124 +125,118 @@ func Generate(opts GenerateOptions) error { var protoDescriptors []*descriptor.FileDescriptorProto for _, skp := range soloKitProjects { - for _, ag := range skp.ApiGroups { - importedResources, err := importCustomResources(ag.Imports) - if err != nil { - return err - } - for _, vc := range ag.VersionConfigs { - vc.CustomResources = append(vc.CustomResources, importedResources...) - for _, vc := range ag.VersionConfigs { - for _, desc := range descriptors { - if filepath.Dir(desc.ProtoFilePath) == filepath.Dir(skp.ProjectFile)+"/"+vc.Version { - vc.VersionProtos = append(vc.VersionProtos, desc.GetName()) - } - protoDescriptors = append(protoDescriptors, desc.FileDescriptorProto) - } + importedResources, err := importCustomResources(skp.ApiGroup.Imports) + if err != nil { + return err + } + for _, vc := range skp.ApiGroup.VersionConfigs { + vc.CustomResources = append(vc.CustomResources, importedResources...) + for _, desc := range descriptors { + if filepath.Dir(desc.ProtoFilePath) == filepath.Dir(skp.ProjectFile)+"/"+vc.Version { + vc.VersionProtos = append(vc.VersionProtos, desc.GetName()) } + protoDescriptors = append(protoDescriptors, desc.FileDescriptorProto) } } } for _, skp := range soloKitProjects { - for _, ag := range skp.ApiGroups { - ag.SoloKitProject = skp - // Store all projects for conversion generation. - var apiGroupVersions []*model.Version - for _, vc := range ag.VersionConfigs { - vc.ApiGroup = ag - - // Build a 'Version' object that contains a resource for each message that: - // - is contained in the FileDescriptor and - // - is a solo kit resource (i.e. it has a field named 'metadata') - - version, err := parser.ProcessDescriptors(vc, ag, protoDescriptors) - if err != nil { - return err - } - apiGroupVersions = append(apiGroupVersions, version) + skp.ApiGroup.SoloKitProject = skp + // Store all projects for conversion generation. + var apiGroupVersions []*model.Version + for _, vc := range skp.ApiGroup.VersionConfigs { + vc.ApiGroup = skp.ApiGroup - code, err := codegen.GenerateProjectFiles(version, true, opts.SkipGeneratedTests) - if err != nil { - return err - } + // Build a 'Version' object that contains a resource for each message that: + // - is contained in the FileDescriptor and + // - is a solo kit resource (i.e. it has a field named 'metadata') + + version, err := parser.ProcessDescriptors(vc, skp.ApiGroup, protoDescriptors) + if err != nil { + return err + } + apiGroupVersions = append(apiGroupVersions, version) + + code, err := codegen.GenerateProjectFiles(version, true, opts.SkipGeneratedTests) + if err != nil { + return err + } + + outDir := filepath.Join(gopathSrc(), version.VersionConfig.GoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err + } - outDir := filepath.Join(gopathSrc(), version.VersionConfig.GoPackage) - if err := writeCodeFiles(code, outDir); err != nil { + genDocs = &DocsOptions{} + if skp.ApiGroup.DocsDir != "" && (genDocs != nil) { + docs, err := docgen.GenerateFiles(version, genDocs) + if err != nil { return err } - genDocs = &DocsOptions{} - if ag.DocsDir != "" && (genDocs != nil) { - docs, err := docgen.GenerateFiles(version, genDocs) - if err != nil { + for _, file := range docs { + path := filepath.Join(absoluteRoot, skp.ApiGroup.DocsDir, file.Filename) + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { return err } - - for _, file := range docs { - path := filepath.Join(absoluteRoot, ag.DocsDir, file.Filename) - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { - return err - } - if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { - return err - } - } - } - - // Generate mocks - // need to run after to make sure all resources have already been written - // Set this env var during tests so that mocks are not generated - if !opts.SkipGenMocks { - if err := genMocks(code, outDir, absoluteRoot); err != nil { + if err := ioutil.WriteFile(path, []byte(file.Content), 0644); err != nil { return err } } } - if ag.ResourceGroupGoPackage != "" { - var allResources []*model.Resource - for _, v := range apiGroupVersions { - allResources = append(allResources, v.Resources...) - } - ag.ResourceGroupsFoo, err = parser.GetResourceGroups(ag, allResources) - if err != nil { - return err - } - - code, err := codegen.GenerateResourceGroupFiles(ag, true, opts.SkipGeneratedTests) - if err != nil { + // Generate mocks + // need to run after to make sure all resources have already been written + // Set this env var during tests so that mocks are not generated + if !opts.SkipGenMocks { + if err := genMocks(code, outDir, absoluteRoot); err != nil { return err } + } + } - outDir := filepath.Join(gopathSrc(), ag.ResourceGroupGoPackage) - if err := writeCodeFiles(code, outDir); err != nil { - return err - } + if skp.ApiGroup.ResourceGroupGoPackage != "" { + var allResources []*model.Resource + for _, v := range apiGroupVersions { + allResources = append(allResources, v.Resources...) + } + skp.ApiGroup.ResourceGroupsFoo, err = parser.GetResourceGroups(skp.ApiGroup, allResources) + if err != nil { + return err + } - // Generate mocks - // need to run after to make sure all resources have already been written - // Set this env var during tests so that mocks are not generated - if !opts.SkipGenMocks { - if err := genMocks(code, outDir, absoluteRoot); err != nil { - return err - } - } + code, err := codegen.GenerateResourceGroupFiles(skp.ApiGroup, true, opts.SkipGeneratedTests) + if err != nil { + return err } - if ag.ConversionGoPackage != "" { - goPackageSegments := strings.Split(ag.ConversionGoPackage, "/") - ag.ConversionGoPackageShort = goPackageSegments[len(goPackageSegments)-1] + outDir := filepath.Join(gopathSrc(), skp.ApiGroup.ResourceGroupGoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err + } - code, err := codegen.GenerateConversionFiles(ag, apiGroupVersions) - if err != nil { + // Generate mocks + // need to run after to make sure all resources have already been written + // Set this env var during tests so that mocks are not generated + if !opts.SkipGenMocks { + if err := genMocks(code, outDir, absoluteRoot); err != nil { return err } + } + } - outDir := filepath.Join(gopathSrc(), ag.ConversionGoPackage) - if err := writeCodeFiles(code, outDir); err != nil { - return err - } + if skp.ApiGroup.ConversionGoPackage != "" { + goPackageSegments := strings.Split(skp.ApiGroup.ConversionGoPackage, "/") + skp.ApiGroup.ConversionGoPackageShort = goPackageSegments[len(goPackageSegments)-1] + + code, err := codegen.GenerateConversionFiles(skp.ApiGroup, apiGroupVersions) + if err != nil { + return err + } + + outDir := filepath.Join(gopathSrc(), skp.ApiGroup.ConversionGoPackage) + if err := writeCodeFiles(code, outDir); err != nil { + return err } } } @@ -595,16 +587,14 @@ func importCustomResources(imports []string) ([]model.CustomResourceConfig, erro if err != nil { return nil, err } - for _, ag := range soloKitProject.ApiGroups { - for _, vc := range ag.VersionConfigs { - var customResources []model.CustomResourceConfig - for _, cr := range vc.CustomResources { - cr.Package = ag.ResourceGroupGoPackage - cr.Imported = true - customResources = append(customResources, cr) - } - results = append(results, customResources...) + for _, vc := range soloKitProject.ApiGroup.VersionConfigs { + var customResources []model.CustomResourceConfig + for _, cr := range vc.CustomResources { + cr.Package = soloKitProject.ApiGroup.ResourceGroupGoPackage + cr.Imported = true + customResources = append(customResources, cr) } + results = append(results, customResources...) } } diff --git a/pkg/code-generator/model/project.go b/pkg/code-generator/model/project.go index bd96d39c2..9f22a59d4 100644 --- a/pkg/code-generator/model/project.go +++ b/pkg/code-generator/model/project.go @@ -19,9 +19,9 @@ const ProjectConfigFilename = "solo-kit.json" // SOLO-KIT Descriptors from which code can be generated type SoloKitProject struct { - Title string `json:"title"` - Description string `json:"description"` - ApiGroups []*ApiGroup `json:"api_groups"` + Title string `json:"title"` + Description string `json:"description"` + ApiGroup *ApiGroup `json:"api_group"` // set by load ProjectFile string @@ -189,17 +189,15 @@ func LoadProjectConfig(path string) (SoloKitProject, error) { } skp.ProjectFile = path - for _, ag := range skp.ApiGroups { - goPackageSegments := strings.Split(ag.ResourceGroupGoPackage, "/") - ag.ResourceGroupGoPackageShort = goPackageSegments[len(goPackageSegments)-1] - for _, vc := range ag.VersionConfigs { - if vc.GoPackage == "" { - goPkg, err := detectGoPackageForVersion(filepath.Dir(skp.ProjectFile) + "/" + vc.Version) - if err != nil { - return SoloKitProject{}, err - } - vc.GoPackage = goPkg + goPackageSegments := strings.Split(skp.ApiGroup.ResourceGroupGoPackage, "/") + skp.ApiGroup.ResourceGroupGoPackageShort = goPackageSegments[len(goPackageSegments)-1] + for _, vc := range skp.ApiGroup.VersionConfigs { + if vc.GoPackage == "" { + goPkg, err := detectGoPackageForVersion(filepath.Dir(skp.ProjectFile) + "/" + vc.Version) + if err != nil { + return SoloKitProject{}, err } + vc.GoPackage = goPkg } } diff --git a/test/mocks/api/solo-kit.json b/test/mocks/api/solo-kit.json index c8f4b8ed9..941f81c61 100644 --- a/test/mocks/api/solo-kit.json +++ b/test/mocks/api/solo-kit.json @@ -1,63 +1,61 @@ { "title": "Solo-Kit Testing", "description": "mock solo-kit project", - "api_groups": [ - { - "name": "testing.solo.io", - "docs_dir": "test/mocks/docs", - "conversion_go_package": "github.com/solo-io/solo-kit/test/mocks/conversion", - "resource_group_go_package": "github.com/solo-io/solo-kit/test/mocks/group", - "imports": [ - "github.com/solo-io/solo-kit/api/external/kubernetes" - ], - "resource_groups": { - "testing.solo.io": [ - { - "name": "MockResource", - "package": "testing.solo.io" - }, - { - "name": "FakeResource", - "package": "testing.solo.io" - }, - { - "name": "AnotherMockResource", - "package": "testing.solo.io" - }, - { - "name": "ClusterResource", - "package": "testing.solo.io" - }, - { - "name": "MockCustomType", - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" - }, - { - "name": "Pod", - "package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - } - ] - }, - "version_configs": [ + "api_group": { + "name": "testing.solo.io", + "docs_dir": "test/mocks/docs", + "conversion_go_package": "github.com/solo-io/solo-kit/test/mocks/conversion", + "resource_group_go_package": "github.com/solo-io/solo-kit/test/mocks/group", + "imports": [ + "github.com/solo-io/solo-kit/api/external/kubernetes" + ], + "resource_groups": { + "testing.solo.io": [ + { + "name": "MockResource", + "package": "testing.solo.io" + }, + { + "name": "FakeResource", + "package": "testing.solo.io" + }, + { + "name": "AnotherMockResource", + "package": "testing.solo.io" + }, { - "version": "v2alpha1" + "name": "ClusterResource", + "package": "testing.solo.io" }, { - "version": "v1alpha1", - "crd_group_override": "crds.testing.solo.io" + "name": "MockCustomType", + "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" }, { - "version": "v1", - "custom_resources": [ - { - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype", - "type": "MockCustomType", - "plural_name": "mcts", - "short_name": "mct" - } - ] + "name": "Pod", + "package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" } ] - } - ] + }, + "version_configs": [ + { + "version": "v2alpha1" + }, + { + "version": "v1alpha1", + "crd_group_override": "crds.testing.solo.io" + }, + { + "version": "v1", + "custom_resources": [ + { + "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype", + "type": "MockCustomType", + "plural_name": "mcts", + "short_name": "mct" + } + ] + } + ] + } } \ No newline at end of file diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md new file mode 100644 index 000000000..a4f7351ac --- /dev/null +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md @@ -0,0 +1,825 @@ + + +### Package: `google.protobuf` +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Author: kenton@google.com (Kenton Varda) + Based on original Protocol Buffers design by + Sanjay Ghemawat, Jeff Dean, and others. + +The messages in this file describe the definitions found in .proto files. +A valid .proto file can be translated directly to a FileDescriptorProto +without any other information (e.g. without reading its imports). + + + +#### Types: + + +- [FileDescriptorSet](#FileDescriptorSet) +- [FileDescriptorProto](#FileDescriptorProto) +- [DescriptorProto](#DescriptorProto) +- [ExtensionRange](#ExtensionRange) +- [ReservedRange](#ReservedRange) +- [FieldDescriptorProto](#FieldDescriptorProto) +- [Type](#Type) +- [Label](#Label) +- [OneofDescriptorProto](#OneofDescriptorProto) +- [EnumDescriptorProto](#EnumDescriptorProto) +- [EnumValueDescriptorProto](#EnumValueDescriptorProto) +- [ServiceDescriptorProto](#ServiceDescriptorProto) +- [MethodDescriptorProto](#MethodDescriptorProto) +- [FileOptions](#FileOptions) +- [OptimizeMode](#OptimizeMode) +- [MessageOptions](#MessageOptions) +- [FieldOptions](#FieldOptions) +- [CType](#CType) +- [JSType](#JSType) +- [OneofOptions](#OneofOptions) +- [EnumOptions](#EnumOptions) +- [EnumValueOptions](#EnumValueOptions) +- [ServiceOptions](#ServiceOptions) +- [MethodOptions](#MethodOptions) +- [IdempotencyLevel](#IdempotencyLevel) +- [UninterpretedOption](#UninterpretedOption) +- [NamePart](#NamePart) +- [SourceCodeInfo](#SourceCodeInfo) +- [Location](#Location) +- [GeneratedCodeInfo](#GeneratedCodeInfo) +- [Annotation](#Annotation) + + + + +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/descriptor.proto) + + + + + +--- +### FileDescriptorSet + + +The protocol compiler can output a FileDescriptorSet containing the .proto +files it parses. + +```yaml +"file": []google.protobuf.FileDescriptorProto + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `file` | [[]google.protobuf.FileDescriptorProto](descriptor.proto.sk.md#FileDescriptorProto) | | | + + + + +--- +### FileDescriptorProto + + +Describes a complete .proto file. + +```yaml +"name": string +"package": string +"dependency": []string +"publicDependency": []int +"weakDependency": []int +"messageType": []google.protobuf.DescriptorProto +"enumType": []google.protobuf.EnumDescriptorProto +"service": []google.protobuf.ServiceDescriptorProto +"extension": []google.protobuf.FieldDescriptorProto +"options": .google.protobuf.FileOptions +"sourceCodeInfo": .google.protobuf.SourceCodeInfo +"syntax": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `package` | `string` | | | +| `dependency` | `[]string` | Names of files imported by this file. | | +| `publicDependency` | `[]int` | Indexes of the public imported files in the dependency list above. | | +| `weakDependency` | `[]int` | Indexes of the weak imported files in the dependency list. For Google-internal migration only. Do not use. | | +| `messageType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | All top-level definitions in this file. | | +| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | +| `service` | [[]google.protobuf.ServiceDescriptorProto](descriptor.proto.sk.md#ServiceDescriptorProto) | | | +| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `options` | [.google.protobuf.FileOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options) | | | +| `sourceCodeInfo` | [.google.protobuf.SourceCodeInfo](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/source-code-info) | This field contains optional information about the original source code. You may safely remove this entire field without harming runtime functionality of the descriptors -- the information is needed only by development tools. | | +| `syntax` | `string` | The syntax of the proto file. The supported values are "proto2" and "proto3". | | + + + + +--- +### DescriptorProto + + +Describes a message type. + +```yaml +"name": string +"field": []google.protobuf.FieldDescriptorProto +"extension": []google.protobuf.FieldDescriptorProto +"nestedType": []google.protobuf.DescriptorProto +"enumType": []google.protobuf.EnumDescriptorProto +"extensionRange": []google.protobuf.DescriptorProto.ExtensionRange +"oneofDecl": []google.protobuf.OneofDescriptorProto +"options": .google.protobuf.MessageOptions +"reservedRange": []google.protobuf.DescriptorProto.ReservedRange +"reservedName": []string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `field` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | +| `nestedType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | | | +| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | +| `extensionRange` | [[]google.protobuf.DescriptorProto.ExtensionRange](descriptor.proto.sk.md#ExtensionRange) | | | +| `oneofDecl` | [[]google.protobuf.OneofDescriptorProto](descriptor.proto.sk.md#OneofDescriptorProto) | | | +| `options` | [.google.protobuf.MessageOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/message-options) | | | +| `reservedRange` | [[]google.protobuf.DescriptorProto.ReservedRange](descriptor.proto.sk.md#ReservedRange) | | | +| `reservedName` | `[]string` | Reserved field names, which may not be used by fields in the same message. A given name may only be reserved once. | | + + + + +--- +### ExtensionRange + + + +```yaml +"start": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `start` | `int` | | | +| `end` | `int` | | | + + + + +--- +### ReservedRange + + +Range of reserved tag numbers. Reserved tag numbers may not be used by +fields or extension ranges in the same message. Reserved ranges may +not overlap. + +```yaml +"start": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `start` | `int` | | | +| `end` | `int` | | | + + + + +--- +### FieldDescriptorProto + + +Describes a field within a message. + +```yaml +"name": string +"number": int +"label": .google.protobuf.FieldDescriptorProto.Label +"type": .google.protobuf.FieldDescriptorProto.Type +"typeName": string +"extendee": string +"defaultValue": string +"oneofIndex": int +"jsonName": string +"options": .google.protobuf.FieldOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `number` | `int` | | | +| `label` | [.google.protobuf.FieldDescriptorProto.Label](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-label) | | | +| `type` | [.google.protobuf.FieldDescriptorProto.Type](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-type) | If type_name is set, this need not be set. If both this and type_name are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. | | +| `typeName` | `string` | For message and enum types, this is the name of the type. If the name starts with a '.', it is fully-qualified. Otherwise, C++-like scoping rules are used to find the type (i.e. first the nested types within this message are searched, then within the parent, on up to the root namespace). | | +| `extendee` | `string` | For extensions, this is the name of the type being extended. It is resolved in the same manner as type_name. | | +| `defaultValue` | `string` | For numeric types, contains the original text representation of the value. For booleans, "true" or "false". For strings, contains the default text contents (not escaped in any way). For bytes, contains the C escaped value. All bytes >= 128 are escaped. TODO(kenton): Base-64 encode? | | +| `oneofIndex` | `int` | If set, gives the index of a oneof in the containing type's oneof_decl list. This field is a member of that oneof. | | +| `jsonName` | `string` | JSON name of this field. The value is set by protocol compiler. If the user has set a "json_name" option on this field, that option's value will be used. Otherwise, it's deduced from the field's name by converting it to camelCase. | | +| `options` | [.google.protobuf.FieldOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options) | | | + + + + +--- +### Type + + + +| Name | Description | +| ----- | ----------- | +| `TYPE_DOUBLE` | 0 is reserved for errors. Order is weird for historical reasons. | +| `TYPE_FLOAT` | | +| `TYPE_INT64` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if negative values are likely. | +| `TYPE_UINT64` | | +| `TYPE_INT32` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if negative values are likely. | +| `TYPE_FIXED64` | | +| `TYPE_FIXED32` | | +| `TYPE_BOOL` | | +| `TYPE_STRING` | | +| `TYPE_GROUP` | Tag-delimited aggregate. Group type is deprecated and not supported in proto3. However, Proto3 implementations should still be able to parse the group wire format and treat group fields as unknown fields. | +| `TYPE_MESSAGE` | | +| `TYPE_BYTES` | New in version 2. | +| `TYPE_UINT32` | | +| `TYPE_ENUM` | | +| `TYPE_SFIXED32` | | +| `TYPE_SFIXED64` | | +| `TYPE_SINT32` | | +| `TYPE_SINT64` | | + + + + +--- +### Label + + + +| Name | Description | +| ----- | ----------- | +| `LABEL_OPTIONAL` | 0 is reserved for errors | +| `LABEL_REQUIRED` | | +| `LABEL_REPEATED` | | + + + + +--- +### OneofDescriptorProto + + +Describes a oneof. + +```yaml +"name": string +"options": .google.protobuf.OneofOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `options` | [.google.protobuf.OneofOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/oneof-options) | | | + + + + +--- +### EnumDescriptorProto + + +Describes an enum type. + +```yaml +"name": string +"value": []google.protobuf.EnumValueDescriptorProto +"options": .google.protobuf.EnumOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `value` | [[]google.protobuf.EnumValueDescriptorProto](descriptor.proto.sk.md#EnumValueDescriptorProto) | | | +| `options` | [.google.protobuf.EnumOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-options) | | | + + + + +--- +### EnumValueDescriptorProto + + +Describes a value within an enum. + +```yaml +"name": string +"number": int +"options": .google.protobuf.EnumValueOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `number` | `int` | | | +| `options` | [.google.protobuf.EnumValueOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-value-options) | | | + + + + +--- +### ServiceDescriptorProto + + +Describes a service. + +```yaml +"name": string +"method": []google.protobuf.MethodDescriptorProto +"options": .google.protobuf.ServiceOptions + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `method` | [[]google.protobuf.MethodDescriptorProto](descriptor.proto.sk.md#MethodDescriptorProto) | | | +| `options` | [.google.protobuf.ServiceOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/service-options) | | | + + + + +--- +### MethodDescriptorProto + + +Describes a method of a service. + +```yaml +"name": string +"inputType": string +"outputType": string +"options": .google.protobuf.MethodOptions +"clientStreaming": bool +"serverStreaming": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | `string` | | | +| `inputType` | `string` | Input and output type names. These are resolved in the same way as FieldDescriptorProto.type_name, but must refer to a message type. | | +| `outputType` | `string` | | | +| `options` | [.google.protobuf.MethodOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options) | | | +| `clientStreaming` | `bool` | Identifies if client streams multiple client messages | Default: false | +| `serverStreaming` | `bool` | Identifies if server streams multiple server messages | Default: false | + + + + +--- +### FileOptions + + + +```yaml +"javaPackage": string +"javaOuterClassname": string +"javaMultipleFiles": bool +"javaGenerateEqualsAndHash": bool +"javaStringCheckUtf8": bool +"optimizeFor": .google.protobuf.FileOptions.OptimizeMode +"goPackage": string +"ccGenericServices": bool +"javaGenericServices": bool +"pyGenericServices": bool +"deprecated": bool +"ccEnableArenas": bool +"objcClassPrefix": string +"csharpNamespace": string +"swiftPrefix": string +"phpClassPrefix": string +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `javaPackage` | `string` | Sets the Java package where classes generated from this .proto will be placed. By default, the proto package is used, but this is often inappropriate because proto packages do not normally start with backwards domain names. | | +| `javaOuterClassname` | `string` | If set, all the classes from the .proto file are wrapped in a single outer class with the given name. This applies to both Proto1 (equivalent to the old "--one_java_file" option) and Proto2 (where a .proto always translates to a single class, but you may want to explicitly choose the class name). | | +| `javaMultipleFiles` | `bool` | If set true, then the Java code generator will generate a separate .java file for each top-level message, enum, and service defined in the .proto file. Thus, these types will *not* be nested inside the outer class named by java_outer_classname. However, the outer class will still be generated to contain the file's getDescriptor() method as well as any top-level extensions defined in the file. | Default: false | +| `javaGenerateEqualsAndHash` | `bool` | This option does nothing. | | +| `javaStringCheckUtf8` | `bool` | If set true, then the Java2 code generator will generate code that throws an exception whenever an attempt is made to assign a non-UTF-8 byte sequence to a string field. Message reflection will do the same. However, an extension field still accepts non-UTF-8 byte sequences. This option has no effect on when used with the lite runtime. | Default: false | +| `optimizeFor` | [.google.protobuf.FileOptions.OptimizeMode](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options.-optimize-mode) | | Default: SPEED | +| `goPackage` | `string` | Sets the Go package where structs generated from this .proto will be placed. If omitted, the Go package will be derived from the following: - The basename of the package import path, if provided. - Otherwise, the package statement in the .proto file, if present. - Otherwise, the basename of the .proto file, without extension. | | +| `ccGenericServices` | `bool` | Should generic services be generated in each language? "Generic" services are not specific to any particular RPC system. They are generated by the main code generators in each language (without additional plugins). Generic services were the only kind of service generation supported by early versions of google.protobuf. Generic services are now considered deprecated in favor of using plugins that generate code specific to your particular RPC system. Therefore, these default to false. Old code which depends on generic services should explicitly set them to true. | Default: false | +| `javaGenericServices` | `bool` | | Default: false | +| `pyGenericServices` | `bool` | | Default: false | +| `deprecated` | `bool` | Is this file deprecated? Depending on the target platform, this can emit Deprecated annotations for everything in the file, or it will be completely ignored; in the very least, this is a formalization for deprecating files. | Default: false | +| `ccEnableArenas` | `bool` | Enables the use of arenas for the proto messages in this file. This applies only to generated classes for C++. | Default: false | +| `objcClassPrefix` | `string` | Sets the objective c class prefix which is prepended to all objective c generated classes from this .proto. There is no default. | | +| `csharpNamespace` | `string` | Namespace for generated classes; defaults to the package. | | +| `swiftPrefix` | `string` | By default Swift generators will take the proto package and CamelCase it replacing '.' with underscore and use that to prefix the types/symbols defined. When this options is provided, they will use this value instead to prefix the types/symbols defined. | | +| `phpClassPrefix` | `string` | Sets the php class prefix which is prepended to all php generated classes from this .proto. Default is empty. | | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### OptimizeMode + + +Generated classes can be optimized for speed or code size. + +| Name | Description | +| ----- | ----------- | +| `SPEED` | | +| `CODE_SIZE` | etc. | +| `LITE_RUNTIME` | | + + + + +--- +### MessageOptions + + + +```yaml +"messageSetWireFormat": bool +"noStandardDescriptorAccessor": bool +"deprecated": bool +"mapEntry": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `messageSetWireFormat` | `bool` | Set true to use the old proto1 MessageSet wire format for extensions. This is provided for backwards-compatibility with the MessageSet wire format. You should not use this for any other reason: It's less efficient, has fewer features, and is more complicated. The message must be defined exactly as follows: message Foo { option message_set_wire_format = true; extensions 4 to max; } Note that the message cannot have any defined fields; MessageSets only have extensions. All extensions of your type must be singular messages; e.g. they cannot be int32s, enums, or repeated messages. Because this is an option, the above two restrictions are not enforced by the protocol compiler. | Default: false | +| `noStandardDescriptorAccessor` | `bool` | Disables the generation of the standard "descriptor()" accessor, which can conflict with a field of the same name. This is meant to make migration from proto1 easier; new code should avoid fields named "descriptor". | Default: false | +| `deprecated` | `bool` | Is this message deprecated? Depending on the target platform, this can emit Deprecated annotations for the message, or it will be completely ignored; in the very least, this is a formalization for deprecating messages. | Default: false | +| `mapEntry` | `bool` | Whether the message is an automatically generated map entry type for the maps field. For maps fields: map map_field = 1; The parsed descriptor looks like: message MapFieldEntry { option map_entry = true; optional KeyType key = 1; optional ValueType value = 2; } repeated MapFieldEntry map_field = 1; Implementations may choose not to generate the map_entry=true message, but use a native map in the target language to hold the keys and values. The reflection APIs in such implementions still need to work as if the field is a repeated message field. NOTE: Do not set the option in .proto files. Always use the maps syntax instead. The option should only be implicitly set by the proto compiler parser. | | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### FieldOptions + + + +```yaml +"ctype": .google.protobuf.FieldOptions.CType +"packed": bool +"jstype": .google.protobuf.FieldOptions.JSType +"lazy": bool +"deprecated": bool +"weak": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `ctype` | [.google.protobuf.FieldOptions.CType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.c-type) | The ctype option instructs the C++ code generator to use a different representation of the field than it normally would. See the specific options below. This option is not yet implemented in the open source release -- sorry, we'll try to include it in a future version! | Default: STRING | +| `packed` | `bool` | The packed option can be enabled for repeated primitive fields to enable a more efficient representation on the wire. Rather than repeatedly writing the tag and type for each element, the entire array is encoded as a single length-delimited blob. In proto3, only explicit setting it to false will avoid using packed encoding. | | +| `jstype` | [.google.protobuf.FieldOptions.JSType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.js-type) | The jstype option determines the JavaScript type used for values of the field. The option is permitted only for 64 bit integral and fixed types (int64, uint64, sint64, fixed64, sfixed64). By default these types are represented as JavaScript strings. This avoids loss of precision that can happen when a large value is converted to a floating point JavaScript numbers. Specifying JS_NUMBER for the jstype causes the generated JavaScript code to use the JavaScript "number" type instead of strings. This option is an enum to permit additional types to be added, e.g. goog.math.Integer. | Default: JS_NORMAL | +| `lazy` | `bool` | Should this field be parsed lazily? Lazy applies only to message-type fields. It means that when the outer message is initially parsed, the inner message's contents will not be parsed but instead stored in encoded form. The inner message will actually be parsed when it is first accessed. This is only a hint. Implementations are free to choose whether to use eager or lazy parsing regardless of the value of this option. However, setting this option true suggests that the protocol author believes that using lazy parsing on this field is worth the additional bookkeeping overhead typically needed to implement it. This option does not affect the public interface of any generated code; all method signatures remain the same. Furthermore, thread-safety of the interface is not affected by this option; const methods remain safe to call from multiple threads concurrently, while non-const methods continue to require exclusive access. Note that implementations may choose not to check required fields within a lazy sub-message. That is, calling IsInitialized() on the outer message may return true even if the inner message has missing required fields. This is necessary because otherwise the inner message would have to be parsed in order to perform the check, defeating the purpose of lazy parsing. An implementation which chooses not to check required fields must be consistent about it. That is, for any particular sub-message, the implementation must either *always* check its required fields, or *never* check its required fields, regardless of whether or not the message has been parsed. | Default: false | +| `deprecated` | `bool` | Is this field deprecated? Depending on the target platform, this can emit Deprecated annotations for accessors, or it will be completely ignored; in the very least, this is a formalization for deprecating fields. | Default: false | +| `weak` | `bool` | For Google-internal migration only. Do not use. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### CType + + + +| Name | Description | +| ----- | ----------- | +| `STRING` | Default mode. | +| `CORD` | | +| `STRING_PIECE` | | + + + + +--- +### JSType + + + +| Name | Description | +| ----- | ----------- | +| `JS_NORMAL` | Use the default type. | +| `JS_STRING` | Use JavaScript strings. | +| `JS_NUMBER` | Use JavaScript numbers. | + + + + +--- +### OneofOptions + + + +```yaml +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### EnumOptions + + + +```yaml +"allowAlias": bool +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `allowAlias` | `bool` | Set this option to true to allow mapping different tag names to the same value. | | +| `deprecated` | `bool` | Is this enum deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum, or it will be completely ignored; in the very least, this is a formalization for deprecating enums. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### EnumValueOptions + + + +```yaml +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this enum value deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum value, or it will be completely ignored; in the very least, this is a formalization for deprecating enum values. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### ServiceOptions + + + +```yaml +"deprecated": bool +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this service deprecated? Depending on the target platform, this can emit Deprecated annotations for the service, or it will be completely ignored; in the very least, this is a formalization for deprecating services. | Default: false | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### MethodOptions + + + +```yaml +"deprecated": bool +"idempotencyLevel": .google.protobuf.MethodOptions.IdempotencyLevel +"uninterpretedOption": []google.protobuf.UninterpretedOption + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `deprecated` | `bool` | Is this method deprecated? Depending on the target platform, this can emit Deprecated annotations for the method, or it will be completely ignored; in the very least, this is a formalization for deprecating methods. | Default: false | +| `idempotencyLevel` | [.google.protobuf.MethodOptions.IdempotencyLevel](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options.-idempotency-level) | | Default: IDEMPOTENCY_UNKNOWN | +| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | + + + + +--- +### IdempotencyLevel + + +Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +or neither? HTTP based RPC implementation may choose GET verb for safe +methods, and PUT verb for idempotent methods instead of the default POST. + +| Name | Description | +| ----- | ----------- | +| `IDEMPOTENCY_UNKNOWN` | | +| `NO_SIDE_EFFECTS` | | +| `IDEMPOTENT` | | + + + + +--- +### UninterpretedOption + + +A message representing a option the parser does not recognize. This only +appears in options protos created by the compiler::Parser class. +DescriptorPool resolves these when building Descriptor objects. Therefore, +options protos in descriptor objects (e.g. returned by Descriptor::options(), +or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +in them. + +```yaml +"name": []google.protobuf.UninterpretedOption.NamePart +"identifierValue": string +"positiveIntValue": int +"negativeIntValue": int +"doubleValue": float +"stringValue": bytes +"aggregateValue": string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `name` | [[]google.protobuf.UninterpretedOption.NamePart](descriptor.proto.sk.md#NamePart) | | | +| `identifierValue` | `string` | The value of the uninterpreted option, in whatever type the tokenizer identified it as during parsing. Exactly one of these should be set. | | +| `positiveIntValue` | `int` | | | +| `negativeIntValue` | `int` | | | +| `doubleValue` | `float` | | | +| `stringValue` | `bytes` | | | +| `aggregateValue` | `string` | | | + + + + +--- +### NamePart + + +The name of the uninterpreted option. Each string represents a segment in +a dot-separated name. is_extension is true iff a segment represents an +extension (denoted with parentheses in options specs in .proto files). +E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +"foo.(bar.baz).qux". + +```yaml +"namePart": string +"isExtension": bool + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `namePart` | `string` | | | +| `isExtension` | `bool` | | | + + + + +--- +### SourceCodeInfo + + +Encapsulates information about the original source file from which a +FileDescriptorProto was generated. + +```yaml +"location": []google.protobuf.SourceCodeInfo.Location + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `location` | [[]google.protobuf.SourceCodeInfo.Location](descriptor.proto.sk.md#Location) | A Location identifies a piece of source code in a .proto file which corresponds to a particular definition. This information is intended to be useful to IDEs, code indexers, documentation generators, and similar tools. For example, say we have a file like: message Foo { optional string foo = 1; } Let's look at just the field definition: optional string foo = 1; ^ ^^ ^^ ^ ^^^ a bc de f ghi We have the following locations: span path represents [a,i) [ 4, 0, 2, 0 ] The whole field definition. [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). [c,d) [ 4, 0, 2, 0, 5 ] The type (string). [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). [g,h) [ 4, 0, 2, 0, 3 ] The number (1). Notes: - A location may refer to a repeated field itself (i.e. not to any particular index within it). This is used whenever a set of elements are logically enclosed in a single code segment. For example, an entire extend block (possibly containing multiple extension definitions) will have an outer location whose path refers to the "extensions" repeated field without an index. - Multiple locations may have the same path. This happens when a single logical declaration is spread out across multiple places. The most obvious example is the "extend" block again -- there may be multiple extend blocks in the same scope, each of which will have the same path. - A location's span is not always a subset of its parent's span. For example, the "extendee" of an extension declaration appears at the beginning of the "extend" block and is shared by all extensions within the block. - Just because a location's span is a subset of some other location's span does not mean that it is a descendent. For example, a "group" defines both a type and a field in a single declaration. Thus, the locations corresponding to the type and field and their components will overlap. - Code which tries to interpret locations should probably be designed to ignore those that it doesn't understand, as more types of locations could be recorded in the future. | | + + + + +--- +### Location + + + +```yaml +"path": []int +"span": []int +"leadingComments": string +"trailingComments": string +"leadingDetachedComments": []string + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `path` | `[]int` | Identifies which part of the FileDescriptorProto was defined at this location. Each element is a field number or an index. They form a path from the root FileDescriptorProto to the place where the definition. For example, this path: [ 4, 3, 2, 7, 1 ] refers to: file.message_type(3) // 4, 3 .field(7) // 2, 7 .name() // 1 This is because FileDescriptorProto.message_type has field number 4: repeated DescriptorProto message_type = 4; and DescriptorProto.field has field number 2: repeated FieldDescriptorProto field = 2; and FieldDescriptorProto.name has field number 1: optional string name = 1; Thus, the above path gives the location of a field name. If we removed the last element: [ 4, 3, 2, 7 ] this path refers to the whole field declaration (from the beginning of the label to the terminating semicolon). | | +| `span` | `[]int` | Always has exactly three or four elements: start line, start column, end line (optional, otherwise assumed same as start line), end column. These are packed into a single field for efficiency. Note that line and column numbers are zero-based -- typically you will want to add 1 to each before displaying to a user. | | +| `leadingComments` | `string` | If this SourceCodeInfo represents a complete declaration, these are any comments appearing before and after the declaration which appear to be attached to the declaration. A series of line comments appearing on consecutive lines, with no other tokens appearing on those lines, will be treated as a single comment. leading_detached_comments will keep paragraphs of comments that appear before (but not connected to) the current element. Each paragraph, separated by empty lines, will be one comment element in the repeated field. Only the comment content is provided; comment markers (e.g. //) are stripped out. For block comments, leading whitespace and an asterisk will be stripped from the beginning of each line other than the first. Newlines are included in the output. Examples: optional int32 foo = 1; // Comment attached to foo. // Comment attached to bar. optional int32 bar = 2; optional string baz = 3; // Comment attached to baz. // Another line attached to baz. // Comment attached to qux. // // Another line attached to qux. optional double qux = 4; // Detached comment for corge. This is not leading or trailing comments // to qux or corge because there are blank lines separating it from // both. // Detached comment for corge paragraph 2. optional string corge = 5; /* Block comment attached * to corge. Leading asterisks * will be removed. */ /* Block comment attached to * grault. */ optional int32 grault = 6; // ignored detached comments. | | +| `trailingComments` | `string` | | | +| `leadingDetachedComments` | `[]string` | | | + + + + +--- +### GeneratedCodeInfo + + +Describes the relationship between generated code and its original source +file. A GeneratedCodeInfo message is associated with only one generated +source file, but may contain references to different source .proto files. + +```yaml +"annotation": []google.protobuf.GeneratedCodeInfo.Annotation + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `annotation` | [[]google.protobuf.GeneratedCodeInfo.Annotation](descriptor.proto.sk.md#Annotation) | An Annotation connects some span of text in generated code to an element of its generating .proto file. | | + + + + +--- +### Annotation + + + +```yaml +"path": []int +"sourceFile": string +"begin": int +"end": int + +``` + +| Field | Type | Description | Default | +| ----- | ---- | ----------- |----------- | +| `path` | `[]int` | Identifies the element in the original source .proto file. This field is formatted the same as SourceCodeInfo.Location.path. | | +| `sourceFile` | `string` | Identifies the filesystem path to the original source .proto. | | +| `begin` | `int` | Identifies the starting offset in bytes in the generated code that relates to the identified object. | | +| `end` | `int` | Identifies the ending offset in bytes in the generated code that relates to the identified offset. The end offset should be one past the last relevant byte (so the length of the text = end - begin). | | + + + + + + + + From c34db6197567734b60e1b172f34d3f511ee6818f Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 11:47:35 -0400 Subject: [PATCH 04/17] remove old gen --- .../v1/kubeconfigs_event_loop.sk.go | 93 -- .../v1/kubeconfigs_event_loop_test.go | 63 -- .../v1/kubeconfigs_simple_event_loop.sk.go | 122 --- .../v1/kubeconfigs_snapshot.sk.go | 60 -- .../v1/kubeconfigs_snapshot_emitter.sk.go | 175 ---- .../v1/kubeconfigs_snapshot_emitter_test.go | 202 ----- .../kubeconfigs_snapshot_simple_emitter.sk.go | 100 --- .../google/protobuf/descriptor.proto.sk.md | 825 ------------------ 8 files changed, 1640 deletions(-) delete mode 100644 pkg/multicluster/v1/kubeconfigs_event_loop.sk.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_event_loop_test.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_snapshot.sk.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go delete mode 100644 pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go delete mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md diff --git a/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go b/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go deleted file mode 100644 index 994b3229d..000000000 --- a/pkg/multicluster/v1/kubeconfigs_event_loop.sk.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package group - -import ( - "context" - - "go.opencensus.io/trace" - - "github.com/hashicorp/go-multierror" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type KubeconfigsSyncer interface { - Sync(context.Context, *KubeconfigsSnapshot) error -} - -type KubeconfigsSyncers []KubeconfigsSyncer - -func (s KubeconfigsSyncers) Sync(ctx context.Context, snapshot *KubeconfigsSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type kubeconfigsEventLoop struct { - emitter KubeconfigsEmitter - syncer KubeconfigsSyncer -} - -func NewKubeconfigsEventLoop(emitter KubeconfigsEmitter, syncer KubeconfigsSyncer) eventloop.EventLoop { - return &kubeconfigsEventLoop{ - emitter: emitter, - syncer: syncer, - } -} - -func (el *kubeconfigsEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "group.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "group.emitter errors") - go func() { - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - // cancel any open watches from previous loop - cancel() - - ctx, span := trace.StartSpan(opts.Ctx, "kubeconfigs.multicluster.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go b/pkg/multicluster/v1/kubeconfigs_event_loop_test.go deleted file mode 100644 index 1cc4f430b..000000000 --- a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -// +build solokit - -package group - -import ( - "context" - "sync" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" -) - -var _ = Describe("KubeconfigsEventLoop", func() { - var ( - namespace string - emitter KubeconfigsEmitter - err error - ) - - BeforeEach(func() { - - kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } - kubeConfigClient, err := NewKubeConfigClient(kubeConfigClientFactory) - Expect(err).NotTo(HaveOccurred()) - - emitter = NewKubeconfigsEmitter(kubeConfigClient) - }) - It("runs sync function on a new snapshot", func() { - _, err = emitter.KubeConfig().Write(NewKubeConfig(namespace, "jerry"), clients.WriteOpts{}) - Expect(err).NotTo(HaveOccurred()) - sync := &mockKubeconfigsSyncer{} - el := NewKubeconfigsEventLoop(emitter, sync) - _, err := el.Run([]string{namespace}, clients.WatchOpts{}) - Expect(err).NotTo(HaveOccurred()) - Eventually(sync.Synced, 5*time.Second).Should(BeTrue()) - }) -}) - -type mockKubeconfigsSyncer struct { - synced bool - mutex sync.Mutex -} - -func (s *mockKubeconfigsSyncer) Synced() bool { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.synced -} - -func (s *mockKubeconfigsSyncer) Sync(ctx context.Context, snap *KubeconfigsSnapshot) error { - s.mutex.Lock() - s.synced = true - s.mutex.Unlock() - return nil -} diff --git a/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go b/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go deleted file mode 100644 index 46cf4ff4e..000000000 --- a/pkg/multicluster/v1/kubeconfigs_simple_event_loop.sk.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package group - -import ( - "context" - "fmt" - - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use KubeconfigsSyncDeciderWithContext -type KubeconfigsSyncDecider interface { - KubeconfigsSyncer - ShouldSync(old, new *KubeconfigsSnapshot) bool -} - -type KubeconfigsSyncDeciderWithContext interface { - KubeconfigsSyncer - ShouldSync(ctx context.Context, old, new *KubeconfigsSnapshot) bool -} - -type kubeconfigsSimpleEventLoop struct { - emitter KubeconfigsSimpleEmitter - syncers []KubeconfigsSyncer -} - -func NewKubeconfigsSimpleEventLoop(emitter KubeconfigsSimpleEmitter, syncers ...KubeconfigsSyncer) eventloop.SimpleEventLoop { - return &kubeconfigsSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *kubeconfigsSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "group.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "group.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[KubeconfigsSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *KubeconfigsSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(KubeconfigsSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(KubeconfigsSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("kubeconfigs.multicluster.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go deleted file mode 100644 index 7ef8d190b..000000000 --- a/pkg/multicluster/v1/kubeconfigs_snapshot.sk.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package group - -import ( - "fmt" - - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap" -) - -type KubeconfigsSnapshot struct { - Kubeconfigs KubeConfigList -} - -func (s KubeconfigsSnapshot) Clone() KubeconfigsSnapshot { - return KubeconfigsSnapshot{ - Kubeconfigs: s.Kubeconfigs.Clone(), - } -} - -func (s KubeconfigsSnapshot) Hash() uint64 { - return hashutils.HashAll( - s.hashKubeconfigs(), - ) -} - -func (s KubeconfigsSnapshot) hashKubeconfigs() uint64 { - return hashutils.HashAll(s.Kubeconfigs.AsInterfaces()...) -} - -func (s KubeconfigsSnapshot) HashFields() []zap.Field { - var fields []zap.Field - fields = append(fields, zap.Uint64("kubeconfigs", s.hashKubeconfigs())) - - return append(fields, zap.Uint64("snapshotHash", s.Hash())) -} - -type KubeconfigsSnapshotStringer struct { - Version uint64 - Kubeconfigs []string -} - -func (ss KubeconfigsSnapshotStringer) String() string { - s := fmt.Sprintf("KubeconfigsSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Kubeconfigs %v\n", len(ss.Kubeconfigs)) - for _, name := range ss.Kubeconfigs { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s KubeconfigsSnapshot) Stringer() KubeconfigsSnapshotStringer { - return KubeconfigsSnapshotStringer{ - Version: s.Hash(), - Kubeconfigs: s.Kubeconfigs.NamespacesDotNames(), - } -} diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go deleted file mode 100644 index 21e331797..000000000 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package group - -import ( - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" -) - -var ( - mKubeconfigsSnapshotIn = stats.Int64("kubeconfigs.multicluster.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") - mKubeconfigsSnapshotOut = stats.Int64("kubeconfigs.multicluster.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") - - kubeconfigssnapshotInView = &view.View{ - Name: "kubeconfigs.multicluster.solo.io_snap_emitter/snap_in", - Measure: mKubeconfigsSnapshotIn, - Description: "The number of snapshots updates coming in", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - kubeconfigssnapshotOutView = &view.View{ - Name: "kubeconfigs.multicluster.solo.io/snap_emitter/snap_out", - Measure: mKubeconfigsSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register(kubeconfigssnapshotInView, kubeconfigssnapshotOutView) -} - -type KubeconfigsEmitter interface { - Register() error - KubeConfig() KubeConfigClient - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) -} - -func NewKubeconfigsEmitter(kubeConfigClient KubeConfigClient) KubeconfigsEmitter { - return NewKubeconfigsEmitterWithEmit(kubeConfigClient, make(chan struct{})) -} - -func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, emit <-chan struct{}) KubeconfigsEmitter { - return &kubeconfigsEmitter{ - kubeConfig: kubeConfigClient, - forceEmit: emit, - } -} - -type kubeconfigsEmitter struct { - forceEmit <-chan struct{} - kubeConfig KubeConfigClient -} - -func (c *kubeconfigsEmitter) Register() error { - if err := c.kubeConfig.Register(); err != nil { - return err - } - return nil -} - -func (c *kubeconfigsEmitter) KubeConfig() KubeConfigClient { - return c.kubeConfig -} - -func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for KubeConfig */ - type kubeConfigListWithNamespace struct { - list KubeConfigList - namespace string - } - kubeConfigChan := make(chan kubeConfigListWithNamespace) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for KubeConfig */ - kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting KubeConfig watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-kubeconfigs") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case kubeConfigList := <-kubeConfigNamespacesChan: - select { - case <-ctx.Done(): - return - case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: - } - } - } - }(namespace) - } - - snapshots := make(chan *KubeconfigsSnapshot) - go func() { - originalSnapshot := KubeconfigsSnapshot{} - currentSnapshot := originalSnapshot.Clone() - timer := time.NewTicker(time.Second * 1) - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - stats.Record(ctx, mKubeconfigsSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - kubeconfigsByNamespace := make(map[string]KubeConfigList) - - for { - record := func() { stats.Record(ctx, mKubeconfigsSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - close(snapshots) - done.Wait() - close(errs) - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case kubeConfigNamespacedList := <-kubeConfigChan: - record() - - namespace := kubeConfigNamespacedList.namespace - - // merge lists by namespace - kubeconfigsByNamespace[namespace] = kubeConfigNamespacedList.list - var kubeConfigList KubeConfigList - for _, kubeconfigs := range kubeconfigsByNamespace { - kubeConfigList = append(kubeConfigList, kubeconfigs...) - } - currentSnapshot.Kubeconfigs = kubeConfigList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go deleted file mode 100644 index a28c005d4..000000000 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go +++ /dev/null @@ -1,202 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -// +build solokit - -package group - -import ( - "context" - "os" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/solo-io/go-utils/kubeutils" - "github.com/solo-io/go-utils/log" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" - "github.com/solo-io/solo-kit/test/helpers" - "k8s.io/client-go/kubernetes" - - // Needed to run tests in GKE - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - // From https://github.com/kubernetes/client-go/blob/53c7adfd0294caa142d961e1f780f74081d5b15f/examples/out-of-cluster-client-configuration/main.go#L31 - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -var _ = Describe("GroupEmitter", func() { - if os.Getenv("RUN_KUBE_TESTS") != "1" { - log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") - return - } - var ( - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - kube kubernetes.Interface - emitter KubeconfigsEmitter - kubeConfigClient KubeConfigClient - ) - - BeforeEach(func() { - namespace1 = helpers.RandString(8) - namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err := kubeutils.CreateNamespacesInParallel(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - // KubeConfig Constructor - kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } - - kubeConfigClient, err = NewKubeConfigClient(kubeConfigClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewKubeconfigsEmitter(kubeConfigClient) - }) - AfterEach(func() { - err := kubeutils.DeleteNamespacesInParallelBlocking(kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) - It("tracks snapshots on changes to any resource", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *KubeconfigsSnapshot - - /* - KubeConfig - */ - - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) - - err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) - - err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) - }) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) - - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) - - var snap *KubeconfigsSnapshot - - /* - KubeConfig - */ - - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) - - err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) - - err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) - }) -}) diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go deleted file mode 100644 index 2905a88b6..000000000 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package group - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type KubeconfigsSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *KubeconfigsSnapshot, <-chan error, error) -} - -func NewKubeconfigsSimpleEmitter(aggregatedWatch clients.ResourceWatch) KubeconfigsSimpleEmitter { - return NewKubeconfigsSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewKubeconfigsSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) KubeconfigsSimpleEmitter { - return &kubeconfigsSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type kubeconfigsSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *kubeconfigsSimpleEmitter) Snapshots(ctx context.Context) (<-chan *KubeconfigsSnapshot, <-chan error, error) { - snapshots := make(chan *KubeconfigsSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "kubeconfigs-emitter") - - go func() { - originalSnapshot := KubeconfigsSnapshot{} - currentSnapshot := originalSnapshot.Clone() - timer := time.NewTicker(time.Second * 1) - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - stats.Record(ctx, mKubeconfigsSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mKubeconfigsSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = KubeconfigsSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *KubeConfig: - currentSnapshot.Kubeconfigs = append(currentSnapshot.Kubeconfigs, typed) - default: - select { - case errs <- fmt.Errorf("KubeconfigsSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md deleted file mode 100644 index a4f7351ac..000000000 --- a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md +++ /dev/null @@ -1,825 +0,0 @@ - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Author: kenton@google.com (Kenton Varda) - Based on original Protocol Buffers design by - Sanjay Ghemawat, Jeff Dean, and others. - -The messages in this file describe the definitions found in .proto files. -A valid .proto file can be translated directly to a FileDescriptorProto -without any other information (e.g. without reading its imports). - - - -#### Types: - - -- [FileDescriptorSet](#FileDescriptorSet) -- [FileDescriptorProto](#FileDescriptorProto) -- [DescriptorProto](#DescriptorProto) -- [ExtensionRange](#ExtensionRange) -- [ReservedRange](#ReservedRange) -- [FieldDescriptorProto](#FieldDescriptorProto) -- [Type](#Type) -- [Label](#Label) -- [OneofDescriptorProto](#OneofDescriptorProto) -- [EnumDescriptorProto](#EnumDescriptorProto) -- [EnumValueDescriptorProto](#EnumValueDescriptorProto) -- [ServiceDescriptorProto](#ServiceDescriptorProto) -- [MethodDescriptorProto](#MethodDescriptorProto) -- [FileOptions](#FileOptions) -- [OptimizeMode](#OptimizeMode) -- [MessageOptions](#MessageOptions) -- [FieldOptions](#FieldOptions) -- [CType](#CType) -- [JSType](#JSType) -- [OneofOptions](#OneofOptions) -- [EnumOptions](#EnumOptions) -- [EnumValueOptions](#EnumValueOptions) -- [ServiceOptions](#ServiceOptions) -- [MethodOptions](#MethodOptions) -- [IdempotencyLevel](#IdempotencyLevel) -- [UninterpretedOption](#UninterpretedOption) -- [NamePart](#NamePart) -- [SourceCodeInfo](#SourceCodeInfo) -- [Location](#Location) -- [GeneratedCodeInfo](#GeneratedCodeInfo) -- [Annotation](#Annotation) - - - - -##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/descriptor.proto) - - - - - ---- -### FileDescriptorSet - - -The protocol compiler can output a FileDescriptorSet containing the .proto -files it parses. - -```yaml -"file": []google.protobuf.FileDescriptorProto - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `file` | [[]google.protobuf.FileDescriptorProto](descriptor.proto.sk.md#FileDescriptorProto) | | | - - - - ---- -### FileDescriptorProto - - -Describes a complete .proto file. - -```yaml -"name": string -"package": string -"dependency": []string -"publicDependency": []int -"weakDependency": []int -"messageType": []google.protobuf.DescriptorProto -"enumType": []google.protobuf.EnumDescriptorProto -"service": []google.protobuf.ServiceDescriptorProto -"extension": []google.protobuf.FieldDescriptorProto -"options": .google.protobuf.FileOptions -"sourceCodeInfo": .google.protobuf.SourceCodeInfo -"syntax": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `package` | `string` | | | -| `dependency` | `[]string` | Names of files imported by this file. | | -| `publicDependency` | `[]int` | Indexes of the public imported files in the dependency list above. | | -| `weakDependency` | `[]int` | Indexes of the weak imported files in the dependency list. For Google-internal migration only. Do not use. | | -| `messageType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | All top-level definitions in this file. | | -| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | -| `service` | [[]google.protobuf.ServiceDescriptorProto](descriptor.proto.sk.md#ServiceDescriptorProto) | | | -| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | -| `options` | [.google.protobuf.FileOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options) | | | -| `sourceCodeInfo` | [.google.protobuf.SourceCodeInfo](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/source-code-info) | This field contains optional information about the original source code. You may safely remove this entire field without harming runtime functionality of the descriptors -- the information is needed only by development tools. | | -| `syntax` | `string` | The syntax of the proto file. The supported values are "proto2" and "proto3". | | - - - - ---- -### DescriptorProto - - -Describes a message type. - -```yaml -"name": string -"field": []google.protobuf.FieldDescriptorProto -"extension": []google.protobuf.FieldDescriptorProto -"nestedType": []google.protobuf.DescriptorProto -"enumType": []google.protobuf.EnumDescriptorProto -"extensionRange": []google.protobuf.DescriptorProto.ExtensionRange -"oneofDecl": []google.protobuf.OneofDescriptorProto -"options": .google.protobuf.MessageOptions -"reservedRange": []google.protobuf.DescriptorProto.ReservedRange -"reservedName": []string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `field` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | -| `extension` | [[]google.protobuf.FieldDescriptorProto](descriptor.proto.sk.md#FieldDescriptorProto) | | | -| `nestedType` | [[]google.protobuf.DescriptorProto](descriptor.proto.sk.md#DescriptorProto) | | | -| `enumType` | [[]google.protobuf.EnumDescriptorProto](descriptor.proto.sk.md#EnumDescriptorProto) | | | -| `extensionRange` | [[]google.protobuf.DescriptorProto.ExtensionRange](descriptor.proto.sk.md#ExtensionRange) | | | -| `oneofDecl` | [[]google.protobuf.OneofDescriptorProto](descriptor.proto.sk.md#OneofDescriptorProto) | | | -| `options` | [.google.protobuf.MessageOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/message-options) | | | -| `reservedRange` | [[]google.protobuf.DescriptorProto.ReservedRange](descriptor.proto.sk.md#ReservedRange) | | | -| `reservedName` | `[]string` | Reserved field names, which may not be used by fields in the same message. A given name may only be reserved once. | | - - - - ---- -### ExtensionRange - - - -```yaml -"start": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `start` | `int` | | | -| `end` | `int` | | | - - - - ---- -### ReservedRange - - -Range of reserved tag numbers. Reserved tag numbers may not be used by -fields or extension ranges in the same message. Reserved ranges may -not overlap. - -```yaml -"start": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `start` | `int` | | | -| `end` | `int` | | | - - - - ---- -### FieldDescriptorProto - - -Describes a field within a message. - -```yaml -"name": string -"number": int -"label": .google.protobuf.FieldDescriptorProto.Label -"type": .google.protobuf.FieldDescriptorProto.Type -"typeName": string -"extendee": string -"defaultValue": string -"oneofIndex": int -"jsonName": string -"options": .google.protobuf.FieldOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `number` | `int` | | | -| `label` | [.google.protobuf.FieldDescriptorProto.Label](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-label) | | | -| `type` | [.google.protobuf.FieldDescriptorProto.Type](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-type) | If type_name is set, this need not be set. If both this and type_name are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. | | -| `typeName` | `string` | For message and enum types, this is the name of the type. If the name starts with a '.', it is fully-qualified. Otherwise, C++-like scoping rules are used to find the type (i.e. first the nested types within this message are searched, then within the parent, on up to the root namespace). | | -| `extendee` | `string` | For extensions, this is the name of the type being extended. It is resolved in the same manner as type_name. | | -| `defaultValue` | `string` | For numeric types, contains the original text representation of the value. For booleans, "true" or "false". For strings, contains the default text contents (not escaped in any way). For bytes, contains the C escaped value. All bytes >= 128 are escaped. TODO(kenton): Base-64 encode? | | -| `oneofIndex` | `int` | If set, gives the index of a oneof in the containing type's oneof_decl list. This field is a member of that oneof. | | -| `jsonName` | `string` | JSON name of this field. The value is set by protocol compiler. If the user has set a "json_name" option on this field, that option's value will be used. Otherwise, it's deduced from the field's name by converting it to camelCase. | | -| `options` | [.google.protobuf.FieldOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options) | | | - - - - ---- -### Type - - - -| Name | Description | -| ----- | ----------- | -| `TYPE_DOUBLE` | 0 is reserved for errors. Order is weird for historical reasons. | -| `TYPE_FLOAT` | | -| `TYPE_INT64` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if negative values are likely. | -| `TYPE_UINT64` | | -| `TYPE_INT32` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if negative values are likely. | -| `TYPE_FIXED64` | | -| `TYPE_FIXED32` | | -| `TYPE_BOOL` | | -| `TYPE_STRING` | | -| `TYPE_GROUP` | Tag-delimited aggregate. Group type is deprecated and not supported in proto3. However, Proto3 implementations should still be able to parse the group wire format and treat group fields as unknown fields. | -| `TYPE_MESSAGE` | | -| `TYPE_BYTES` | New in version 2. | -| `TYPE_UINT32` | | -| `TYPE_ENUM` | | -| `TYPE_SFIXED32` | | -| `TYPE_SFIXED64` | | -| `TYPE_SINT32` | | -| `TYPE_SINT64` | | - - - - ---- -### Label - - - -| Name | Description | -| ----- | ----------- | -| `LABEL_OPTIONAL` | 0 is reserved for errors | -| `LABEL_REQUIRED` | | -| `LABEL_REPEATED` | | - - - - ---- -### OneofDescriptorProto - - -Describes a oneof. - -```yaml -"name": string -"options": .google.protobuf.OneofOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `options` | [.google.protobuf.OneofOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/oneof-options) | | | - - - - ---- -### EnumDescriptorProto - - -Describes an enum type. - -```yaml -"name": string -"value": []google.protobuf.EnumValueDescriptorProto -"options": .google.protobuf.EnumOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `value` | [[]google.protobuf.EnumValueDescriptorProto](descriptor.proto.sk.md#EnumValueDescriptorProto) | | | -| `options` | [.google.protobuf.EnumOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-options) | | | - - - - ---- -### EnumValueDescriptorProto - - -Describes a value within an enum. - -```yaml -"name": string -"number": int -"options": .google.protobuf.EnumValueOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `number` | `int` | | | -| `options` | [.google.protobuf.EnumValueOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-value-options) | | | - - - - ---- -### ServiceDescriptorProto - - -Describes a service. - -```yaml -"name": string -"method": []google.protobuf.MethodDescriptorProto -"options": .google.protobuf.ServiceOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `method` | [[]google.protobuf.MethodDescriptorProto](descriptor.proto.sk.md#MethodDescriptorProto) | | | -| `options` | [.google.protobuf.ServiceOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/service-options) | | | - - - - ---- -### MethodDescriptorProto - - -Describes a method of a service. - -```yaml -"name": string -"inputType": string -"outputType": string -"options": .google.protobuf.MethodOptions -"clientStreaming": bool -"serverStreaming": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `inputType` | `string` | Input and output type names. These are resolved in the same way as FieldDescriptorProto.type_name, but must refer to a message type. | | -| `outputType` | `string` | | | -| `options` | [.google.protobuf.MethodOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options) | | | -| `clientStreaming` | `bool` | Identifies if client streams multiple client messages | Default: false | -| `serverStreaming` | `bool` | Identifies if server streams multiple server messages | Default: false | - - - - ---- -### FileOptions - - - -```yaml -"javaPackage": string -"javaOuterClassname": string -"javaMultipleFiles": bool -"javaGenerateEqualsAndHash": bool -"javaStringCheckUtf8": bool -"optimizeFor": .google.protobuf.FileOptions.OptimizeMode -"goPackage": string -"ccGenericServices": bool -"javaGenericServices": bool -"pyGenericServices": bool -"deprecated": bool -"ccEnableArenas": bool -"objcClassPrefix": string -"csharpNamespace": string -"swiftPrefix": string -"phpClassPrefix": string -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `javaPackage` | `string` | Sets the Java package where classes generated from this .proto will be placed. By default, the proto package is used, but this is often inappropriate because proto packages do not normally start with backwards domain names. | | -| `javaOuterClassname` | `string` | If set, all the classes from the .proto file are wrapped in a single outer class with the given name. This applies to both Proto1 (equivalent to the old "--one_java_file" option) and Proto2 (where a .proto always translates to a single class, but you may want to explicitly choose the class name). | | -| `javaMultipleFiles` | `bool` | If set true, then the Java code generator will generate a separate .java file for each top-level message, enum, and service defined in the .proto file. Thus, these types will *not* be nested inside the outer class named by java_outer_classname. However, the outer class will still be generated to contain the file's getDescriptor() method as well as any top-level extensions defined in the file. | Default: false | -| `javaGenerateEqualsAndHash` | `bool` | This option does nothing. | | -| `javaStringCheckUtf8` | `bool` | If set true, then the Java2 code generator will generate code that throws an exception whenever an attempt is made to assign a non-UTF-8 byte sequence to a string field. Message reflection will do the same. However, an extension field still accepts non-UTF-8 byte sequences. This option has no effect on when used with the lite runtime. | Default: false | -| `optimizeFor` | [.google.protobuf.FileOptions.OptimizeMode](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options.-optimize-mode) | | Default: SPEED | -| `goPackage` | `string` | Sets the Go package where structs generated from this .proto will be placed. If omitted, the Go package will be derived from the following: - The basename of the package import path, if provided. - Otherwise, the package statement in the .proto file, if present. - Otherwise, the basename of the .proto file, without extension. | | -| `ccGenericServices` | `bool` | Should generic services be generated in each language? "Generic" services are not specific to any particular RPC system. They are generated by the main code generators in each language (without additional plugins). Generic services were the only kind of service generation supported by early versions of google.protobuf. Generic services are now considered deprecated in favor of using plugins that generate code specific to your particular RPC system. Therefore, these default to false. Old code which depends on generic services should explicitly set them to true. | Default: false | -| `javaGenericServices` | `bool` | | Default: false | -| `pyGenericServices` | `bool` | | Default: false | -| `deprecated` | `bool` | Is this file deprecated? Depending on the target platform, this can emit Deprecated annotations for everything in the file, or it will be completely ignored; in the very least, this is a formalization for deprecating files. | Default: false | -| `ccEnableArenas` | `bool` | Enables the use of arenas for the proto messages in this file. This applies only to generated classes for C++. | Default: false | -| `objcClassPrefix` | `string` | Sets the objective c class prefix which is prepended to all objective c generated classes from this .proto. There is no default. | | -| `csharpNamespace` | `string` | Namespace for generated classes; defaults to the package. | | -| `swiftPrefix` | `string` | By default Swift generators will take the proto package and CamelCase it replacing '.' with underscore and use that to prefix the types/symbols defined. When this options is provided, they will use this value instead to prefix the types/symbols defined. | | -| `phpClassPrefix` | `string` | Sets the php class prefix which is prepended to all php generated classes from this .proto. Default is empty. | | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### OptimizeMode - - -Generated classes can be optimized for speed or code size. - -| Name | Description | -| ----- | ----------- | -| `SPEED` | | -| `CODE_SIZE` | etc. | -| `LITE_RUNTIME` | | - - - - ---- -### MessageOptions - - - -```yaml -"messageSetWireFormat": bool -"noStandardDescriptorAccessor": bool -"deprecated": bool -"mapEntry": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `messageSetWireFormat` | `bool` | Set true to use the old proto1 MessageSet wire format for extensions. This is provided for backwards-compatibility with the MessageSet wire format. You should not use this for any other reason: It's less efficient, has fewer features, and is more complicated. The message must be defined exactly as follows: message Foo { option message_set_wire_format = true; extensions 4 to max; } Note that the message cannot have any defined fields; MessageSets only have extensions. All extensions of your type must be singular messages; e.g. they cannot be int32s, enums, or repeated messages. Because this is an option, the above two restrictions are not enforced by the protocol compiler. | Default: false | -| `noStandardDescriptorAccessor` | `bool` | Disables the generation of the standard "descriptor()" accessor, which can conflict with a field of the same name. This is meant to make migration from proto1 easier; new code should avoid fields named "descriptor". | Default: false | -| `deprecated` | `bool` | Is this message deprecated? Depending on the target platform, this can emit Deprecated annotations for the message, or it will be completely ignored; in the very least, this is a formalization for deprecating messages. | Default: false | -| `mapEntry` | `bool` | Whether the message is an automatically generated map entry type for the maps field. For maps fields: map map_field = 1; The parsed descriptor looks like: message MapFieldEntry { option map_entry = true; optional KeyType key = 1; optional ValueType value = 2; } repeated MapFieldEntry map_field = 1; Implementations may choose not to generate the map_entry=true message, but use a native map in the target language to hold the keys and values. The reflection APIs in such implementions still need to work as if the field is a repeated message field. NOTE: Do not set the option in .proto files. Always use the maps syntax instead. The option should only be implicitly set by the proto compiler parser. | | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### FieldOptions - - - -```yaml -"ctype": .google.protobuf.FieldOptions.CType -"packed": bool -"jstype": .google.protobuf.FieldOptions.JSType -"lazy": bool -"deprecated": bool -"weak": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `ctype` | [.google.protobuf.FieldOptions.CType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.c-type) | The ctype option instructs the C++ code generator to use a different representation of the field than it normally would. See the specific options below. This option is not yet implemented in the open source release -- sorry, we'll try to include it in a future version! | Default: STRING | -| `packed` | `bool` | The packed option can be enabled for repeated primitive fields to enable a more efficient representation on the wire. Rather than repeatedly writing the tag and type for each element, the entire array is encoded as a single length-delimited blob. In proto3, only explicit setting it to false will avoid using packed encoding. | | -| `jstype` | [.google.protobuf.FieldOptions.JSType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.js-type) | The jstype option determines the JavaScript type used for values of the field. The option is permitted only for 64 bit integral and fixed types (int64, uint64, sint64, fixed64, sfixed64). By default these types are represented as JavaScript strings. This avoids loss of precision that can happen when a large value is converted to a floating point JavaScript numbers. Specifying JS_NUMBER for the jstype causes the generated JavaScript code to use the JavaScript "number" type instead of strings. This option is an enum to permit additional types to be added, e.g. goog.math.Integer. | Default: JS_NORMAL | -| `lazy` | `bool` | Should this field be parsed lazily? Lazy applies only to message-type fields. It means that when the outer message is initially parsed, the inner message's contents will not be parsed but instead stored in encoded form. The inner message will actually be parsed when it is first accessed. This is only a hint. Implementations are free to choose whether to use eager or lazy parsing regardless of the value of this option. However, setting this option true suggests that the protocol author believes that using lazy parsing on this field is worth the additional bookkeeping overhead typically needed to implement it. This option does not affect the public interface of any generated code; all method signatures remain the same. Furthermore, thread-safety of the interface is not affected by this option; const methods remain safe to call from multiple threads concurrently, while non-const methods continue to require exclusive access. Note that implementations may choose not to check required fields within a lazy sub-message. That is, calling IsInitialized() on the outer message may return true even if the inner message has missing required fields. This is necessary because otherwise the inner message would have to be parsed in order to perform the check, defeating the purpose of lazy parsing. An implementation which chooses not to check required fields must be consistent about it. That is, for any particular sub-message, the implementation must either *always* check its required fields, or *never* check its required fields, regardless of whether or not the message has been parsed. | Default: false | -| `deprecated` | `bool` | Is this field deprecated? Depending on the target platform, this can emit Deprecated annotations for accessors, or it will be completely ignored; in the very least, this is a formalization for deprecating fields. | Default: false | -| `weak` | `bool` | For Google-internal migration only. Do not use. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### CType - - - -| Name | Description | -| ----- | ----------- | -| `STRING` | Default mode. | -| `CORD` | | -| `STRING_PIECE` | | - - - - ---- -### JSType - - - -| Name | Description | -| ----- | ----------- | -| `JS_NORMAL` | Use the default type. | -| `JS_STRING` | Use JavaScript strings. | -| `JS_NUMBER` | Use JavaScript numbers. | - - - - ---- -### OneofOptions - - - -```yaml -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### EnumOptions - - - -```yaml -"allowAlias": bool -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `allowAlias` | `bool` | Set this option to true to allow mapping different tag names to the same value. | | -| `deprecated` | `bool` | Is this enum deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum, or it will be completely ignored; in the very least, this is a formalization for deprecating enums. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### EnumValueOptions - - - -```yaml -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this enum value deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum value, or it will be completely ignored; in the very least, this is a formalization for deprecating enum values. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### ServiceOptions - - - -```yaml -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this service deprecated? Depending on the target platform, this can emit Deprecated annotations for the service, or it will be completely ignored; in the very least, this is a formalization for deprecating services. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### MethodOptions - - - -```yaml -"deprecated": bool -"idempotencyLevel": .google.protobuf.MethodOptions.IdempotencyLevel -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this method deprecated? Depending on the target platform, this can emit Deprecated annotations for the method, or it will be completely ignored; in the very least, this is a formalization for deprecating methods. | Default: false | -| `idempotencyLevel` | [.google.protobuf.MethodOptions.IdempotencyLevel](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options.-idempotency-level) | | Default: IDEMPOTENCY_UNKNOWN | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](descriptor.proto.sk.md#UninterpretedOption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### IdempotencyLevel - - -Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -or neither? HTTP based RPC implementation may choose GET verb for safe -methods, and PUT verb for idempotent methods instead of the default POST. - -| Name | Description | -| ----- | ----------- | -| `IDEMPOTENCY_UNKNOWN` | | -| `NO_SIDE_EFFECTS` | | -| `IDEMPOTENT` | | - - - - ---- -### UninterpretedOption - - -A message representing a option the parser does not recognize. This only -appears in options protos created by the compiler::Parser class. -DescriptorPool resolves these when building Descriptor objects. Therefore, -options protos in descriptor objects (e.g. returned by Descriptor::options(), -or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -in them. - -```yaml -"name": []google.protobuf.UninterpretedOption.NamePart -"identifierValue": string -"positiveIntValue": int -"negativeIntValue": int -"doubleValue": float -"stringValue": bytes -"aggregateValue": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | [[]google.protobuf.UninterpretedOption.NamePart](descriptor.proto.sk.md#NamePart) | | | -| `identifierValue` | `string` | The value of the uninterpreted option, in whatever type the tokenizer identified it as during parsing. Exactly one of these should be set. | | -| `positiveIntValue` | `int` | | | -| `negativeIntValue` | `int` | | | -| `doubleValue` | `float` | | | -| `stringValue` | `bytes` | | | -| `aggregateValue` | `string` | | | - - - - ---- -### NamePart - - -The name of the uninterpreted option. Each string represents a segment in -a dot-separated name. is_extension is true iff a segment represents an -extension (denoted with parentheses in options specs in .proto files). -E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -"foo.(bar.baz).qux". - -```yaml -"namePart": string -"isExtension": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `namePart` | `string` | | | -| `isExtension` | `bool` | | | - - - - ---- -### SourceCodeInfo - - -Encapsulates information about the original source file from which a -FileDescriptorProto was generated. - -```yaml -"location": []google.protobuf.SourceCodeInfo.Location - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `location` | [[]google.protobuf.SourceCodeInfo.Location](descriptor.proto.sk.md#Location) | A Location identifies a piece of source code in a .proto file which corresponds to a particular definition. This information is intended to be useful to IDEs, code indexers, documentation generators, and similar tools. For example, say we have a file like: message Foo { optional string foo = 1; } Let's look at just the field definition: optional string foo = 1; ^ ^^ ^^ ^ ^^^ a bc de f ghi We have the following locations: span path represents [a,i) [ 4, 0, 2, 0 ] The whole field definition. [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). [c,d) [ 4, 0, 2, 0, 5 ] The type (string). [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). [g,h) [ 4, 0, 2, 0, 3 ] The number (1). Notes: - A location may refer to a repeated field itself (i.e. not to any particular index within it). This is used whenever a set of elements are logically enclosed in a single code segment. For example, an entire extend block (possibly containing multiple extension definitions) will have an outer location whose path refers to the "extensions" repeated field without an index. - Multiple locations may have the same path. This happens when a single logical declaration is spread out across multiple places. The most obvious example is the "extend" block again -- there may be multiple extend blocks in the same scope, each of which will have the same path. - A location's span is not always a subset of its parent's span. For example, the "extendee" of an extension declaration appears at the beginning of the "extend" block and is shared by all extensions within the block. - Just because a location's span is a subset of some other location's span does not mean that it is a descendent. For example, a "group" defines both a type and a field in a single declaration. Thus, the locations corresponding to the type and field and their components will overlap. - Code which tries to interpret locations should probably be designed to ignore those that it doesn't understand, as more types of locations could be recorded in the future. | | - - - - ---- -### Location - - - -```yaml -"path": []int -"span": []int -"leadingComments": string -"trailingComments": string -"leadingDetachedComments": []string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `path` | `[]int` | Identifies which part of the FileDescriptorProto was defined at this location. Each element is a field number or an index. They form a path from the root FileDescriptorProto to the place where the definition. For example, this path: [ 4, 3, 2, 7, 1 ] refers to: file.message_type(3) // 4, 3 .field(7) // 2, 7 .name() // 1 This is because FileDescriptorProto.message_type has field number 4: repeated DescriptorProto message_type = 4; and DescriptorProto.field has field number 2: repeated FieldDescriptorProto field = 2; and FieldDescriptorProto.name has field number 1: optional string name = 1; Thus, the above path gives the location of a field name. If we removed the last element: [ 4, 3, 2, 7 ] this path refers to the whole field declaration (from the beginning of the label to the terminating semicolon). | | -| `span` | `[]int` | Always has exactly three or four elements: start line, start column, end line (optional, otherwise assumed same as start line), end column. These are packed into a single field for efficiency. Note that line and column numbers are zero-based -- typically you will want to add 1 to each before displaying to a user. | | -| `leadingComments` | `string` | If this SourceCodeInfo represents a complete declaration, these are any comments appearing before and after the declaration which appear to be attached to the declaration. A series of line comments appearing on consecutive lines, with no other tokens appearing on those lines, will be treated as a single comment. leading_detached_comments will keep paragraphs of comments that appear before (but not connected to) the current element. Each paragraph, separated by empty lines, will be one comment element in the repeated field. Only the comment content is provided; comment markers (e.g. //) are stripped out. For block comments, leading whitespace and an asterisk will be stripped from the beginning of each line other than the first. Newlines are included in the output. Examples: optional int32 foo = 1; // Comment attached to foo. // Comment attached to bar. optional int32 bar = 2; optional string baz = 3; // Comment attached to baz. // Another line attached to baz. // Comment attached to qux. // // Another line attached to qux. optional double qux = 4; // Detached comment for corge. This is not leading or trailing comments // to qux or corge because there are blank lines separating it from // both. // Detached comment for corge paragraph 2. optional string corge = 5; /* Block comment attached * to corge. Leading asterisks * will be removed. */ /* Block comment attached to * grault. */ optional int32 grault = 6; // ignored detached comments. | | -| `trailingComments` | `string` | | | -| `leadingDetachedComments` | `[]string` | | | - - - - ---- -### GeneratedCodeInfo - - -Describes the relationship between generated code and its original source -file. A GeneratedCodeInfo message is associated with only one generated -source file, but may contain references to different source .proto files. - -```yaml -"annotation": []google.protobuf.GeneratedCodeInfo.Annotation - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `annotation` | [[]google.protobuf.GeneratedCodeInfo.Annotation](descriptor.proto.sk.md#Annotation) | An Annotation connects some span of text in generated code to an element of its generating .proto file. | | - - - - ---- -### Annotation - - - -```yaml -"path": []int -"sourceFile": string -"begin": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `path` | `[]int` | Identifies the element in the original source .proto file. This field is formatted the same as SourceCodeInfo.Location.path. | | -| `sourceFile` | `string` | Identifies the filesystem path to the original source .proto. | | -| `begin` | `int` | Identifies the starting offset in bytes in the generated code that relates to the identified object. | | -| `end` | `int` | Identifies the ending offset in bytes in the generated code that relates to the identified offset. The end offset should be one past the last relevant byte (so the length of the text = end - begin). | | - - - - - - - - From dab9a825b4fc1eb1d784f557f9f10b70b550a7b5 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 12:28:12 -0400 Subject: [PATCH 05/17] most initial comments --- pkg/code-generator/cmd/main.go | 5 +---- .../codegen/templates/converter_template.go | 7 +++---- .../codegen/templates/converter_test_template.go | 4 +++- pkg/code-generator/codegen/templates/errors/errors.go | 7 +++++++ pkg/code-generator/model/project.go | 3 +-- pkg/code-generator/parser/parser_resource.go | 5 +---- test/mocks/conversion/resource_converter.sk.go | 11 +++++------ test/mocks/conversion/resource_converter_test.go | 2 ++ 8 files changed, 23 insertions(+), 21 deletions(-) create mode 100644 pkg/code-generator/codegen/templates/errors/errors.go diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index 94171b38c..bd430c40a 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -167,7 +167,6 @@ func Generate(opts GenerateOptions) error { return err } - genDocs = &DocsOptions{} if skp.ApiGroup.DocsDir != "" && (genDocs != nil) { docs, err := docgen.GenerateFiles(version, genDocs) if err != nil { @@ -226,9 +225,7 @@ func Generate(opts GenerateOptions) error { } if skp.ApiGroup.ConversionGoPackage != "" { - goPackageSegments := strings.Split(skp.ApiGroup.ConversionGoPackage, "/") - skp.ApiGroup.ConversionGoPackageShort = goPackageSegments[len(goPackageSegments)-1] - + skp.ApiGroup.ConversionGoPackageShort = filepath.Base(skp.ApiGroup.ConversionGoPackage) code, err := codegen.GenerateConversionFiles(skp.ApiGroup, apiGroupVersions) if err != nil { return err diff --git a/pkg/code-generator/codegen/templates/converter_template.go b/pkg/code-generator/codegen/templates/converter_template.go index 943ca2cea..546e26587 100644 --- a/pkg/code-generator/codegen/templates/converter_template.go +++ b/pkg/code-generator/codegen/templates/converter_template.go @@ -7,10 +7,9 @@ import ( var ConverterTemplate = template.Must(template.New("converter").Funcs(Funcs).Parse(`package {{ .ConversionGoPackageShort }} import ( - "errors" - "github.com/solo-io/go-utils/versionutils/kubeapi" "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + "github.com/solo-io/solo-kit/pkg/code-generator/codegen/templates/errors" {{- range .Conversions }} {{- range .Projects }} @@ -81,7 +80,7 @@ func (c *{{ lower_camel $resourceName }}Converter) convertUp(src, dst crd.SoloKi {{- end }} {{- end }} } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } func (c *{{ lower_camel $resourceName }}Converter) convertDown(src, dst crd.SoloKitCrd) error { @@ -97,7 +96,7 @@ func (c *{{ lower_camel $resourceName }}Converter) convertDown(src, dst crd.Solo {{- end }} {{- end }} } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } {{- end }} diff --git a/pkg/code-generator/codegen/templates/converter_test_template.go b/pkg/code-generator/codegen/templates/converter_test_template.go index b2f93cf79..04a4a385c 100644 --- a/pkg/code-generator/codegen/templates/converter_test_template.go +++ b/pkg/code-generator/codegen/templates/converter_test_template.go @@ -4,7 +4,9 @@ import ( "text/template" ) -var ConverterTestTemplate = template.Must(template.New("converter_test").Funcs(Funcs).Parse(`package {{ .ConversionGoPackageShort }}_test +var ConverterTestTemplate = template.Must(template.New("converter_test").Funcs(Funcs).Parse(`// +build solokit + +package {{ .ConversionGoPackageShort }}_test {{ $short_package := .ConversionGoPackageShort }} diff --git a/pkg/code-generator/codegen/templates/errors/errors.go b/pkg/code-generator/codegen/templates/errors/errors.go new file mode 100644 index 000000000..94cd7a783 --- /dev/null +++ b/pkg/code-generator/codegen/templates/errors/errors.go @@ -0,0 +1,7 @@ +package errors + +import "errors" + +var ( + UnrecognizedSourceType = errors.New("unrecognized source type; this should never happen") +) diff --git a/pkg/code-generator/model/project.go b/pkg/code-generator/model/project.go index 9f22a59d4..9ade419b8 100644 --- a/pkg/code-generator/model/project.go +++ b/pkg/code-generator/model/project.go @@ -189,8 +189,7 @@ func LoadProjectConfig(path string) (SoloKitProject, error) { } skp.ProjectFile = path - goPackageSegments := strings.Split(skp.ApiGroup.ResourceGroupGoPackage, "/") - skp.ApiGroup.ResourceGroupGoPackageShort = goPackageSegments[len(goPackageSegments)-1] + skp.ApiGroup.ResourceGroupGoPackageShort = filepath.Base(skp.ApiGroup.ResourceGroupGoPackage) for _, vc := range skp.ApiGroup.VersionConfigs { if vc.GoPackage == "" { goPkg, err := detectGoPackageForVersion(filepath.Dir(skp.ProjectFile) + "/" + vc.Version) diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index 15e5bfa68..604da5c13 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -46,10 +46,7 @@ func getResource(resources []*model.Resource, cfg model.ResourceConfig) (*model. possibleResources = append(possibleResources, res) } } - switch len(possibleResources) { - case 1: - return possibleResources[0], nil - case 0: + if len(possibleResources) == 0 { return nil, errors.Errorf("getting resource: message %v not found", cfg) } diff --git a/test/mocks/conversion/resource_converter.sk.go b/test/mocks/conversion/resource_converter.sk.go index 1742faff5..790c74e57 100644 --- a/test/mocks/conversion/resource_converter.sk.go +++ b/test/mocks/conversion/resource_converter.sk.go @@ -3,10 +3,9 @@ package conversion import ( - "errors" - "github.com/solo-io/go-utils/versionutils/kubeapi" "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" + "github.com/solo-io/solo-kit/pkg/code-generator/codegen/templates/errors" v1 "github.com/solo-io/solo-kit/test/mocks/v1" v1alpha1 "github.com/solo-io/solo-kit/test/mocks/v1alpha1" v2alpha1 "github.com/solo-io/solo-kit/test/mocks/v2alpha1" @@ -59,7 +58,7 @@ func (c *fakeResourceConverter) convertUp(src, dst crd.SoloKitCrd) error { case *v1alpha1.FakeResource: return c.convertUp(c.upConverter.FromV1Alpha1ToV1(t), dst) } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } func (c *fakeResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { @@ -71,7 +70,7 @@ func (c *fakeResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { case *v1.FakeResource: return c.convertDown(c.downConverter.FromV1ToV1Alpha1(t), dst) } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } type MockResourceUpConverter interface { @@ -125,7 +124,7 @@ func (c *mockResourceConverter) convertUp(src, dst crd.SoloKitCrd) error { case *v1.MockResource: return c.convertUp(c.upConverter.FromV1ToV2Alpha1(t), dst) } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } func (c *mockResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { @@ -139,5 +138,5 @@ func (c *mockResourceConverter) convertDown(src, dst crd.SoloKitCrd) error { case *v2alpha1.MockResource: return c.convertDown(c.downConverter.FromV2Alpha1ToV1(t), dst) } - return errors.New("unrecognized source type, this should never happen") + return errors.UnrecognizedSourceType } diff --git a/test/mocks/conversion/resource_converter_test.go b/test/mocks/conversion/resource_converter_test.go index b73e96453..9656dbdfe 100644 --- a/test/mocks/conversion/resource_converter_test.go +++ b/test/mocks/conversion/resource_converter_test.go @@ -1,5 +1,7 @@ // Code generated by solo-kit. DO NOT EDIT. +// +build solokit + package conversion_test import ( From 60b95e8ceda76f10d7a1f1040a0b27827b964aad Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 13:14:21 -0400 Subject: [PATCH 06/17] big rename --- pkg/code-generator/codegen/conversion.go | 28 +++++----- pkg/code-generator/codegen/project.go | 28 +++++----- pkg/code-generator/codegen/resource_group.go | 1 - .../docgen/funcs/template_funcs.go | 56 +++++++++---------- pkg/code-generator/docgen/generator.go | 10 ++-- .../templates/markdown/project_template.go | 4 +- .../templates/markdown/proto_file_template.go | 4 +- .../restructured/project_template.go | 4 +- .../restructured/proto_file_template.go | 4 +- pkg/code-generator/parser/parser_resource.go | 2 +- pkg/code-generator/parser/parser_xds.go | 12 ++-- 11 files changed, 76 insertions(+), 77 deletions(-) diff --git a/pkg/code-generator/codegen/conversion.go b/pkg/code-generator/codegen/conversion.go index 2041a4672..37ed4d89c 100644 --- a/pkg/code-generator/codegen/conversion.go +++ b/pkg/code-generator/codegen/conversion.go @@ -14,46 +14,46 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func GenerateConversionFiles(soloKitProject *model.ApiGroup, projects []*model.Version) (code_generator.Files, error) { +func GenerateConversionFiles(soloKitProject *model.ApiGroup, versions []*model.Version) (code_generator.Files, error) { var files code_generator.Files - sort.SliceStable(projects, func(i, j int) bool { - vi, err := kubeapi.ParseVersion(projects[i].VersionConfig.Version) + sort.SliceStable(versions, func(i, j int) bool { + vi, err := kubeapi.ParseVersion(versions[i].VersionConfig.Version) if err != nil { return false } - vj, err := kubeapi.ParseVersion(projects[j].VersionConfig.Version) + vj, err := kubeapi.ParseVersion(versions[j].VersionConfig.Version) if err != nil { return false } return vi.LessThan(vj) }) - resourceNameToProjects := make(map[string][]*model.Version) + resourceNameToVersions := make(map[string][]*model.Version) - for index, project := range projects { - for _, res := range project.Resources { + for i, v := range versions { + for _, res := range v.Resources { // only generate files for the resources in our group, otherwise we import - if !project.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { + if !v.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ - "resource proto package must match project proto package %v", res.ProtoPackage, res.Name, project.ProtoPackage) + "resource proto package must match v proto package %v", res.ProtoPackage, res.Name, v.ProtoPackage) continue } else if res.IsCustom && res.CustomResource.Imported { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ - "custom resources from a different project are not generated", res.GoPackage, res.Name, project.VersionConfig.GoPackage) + "custom resources from a different v are not generated", res.GoPackage, res.Name, v.VersionConfig.GoPackage) continue } - if _, found := resourceNameToProjects[res.Name]; !found { - resourceNameToProjects[res.Name] = make([]*model.Version, 0, len(projects)-index) + if _, found := resourceNameToVersions[res.Name]; !found { + resourceNameToVersions[res.Name] = make([]*model.Version, 0, len(versions)-i) } - resourceNameToProjects[res.Name] = append(resourceNameToProjects[res.Name], project) + resourceNameToVersions[res.Name] = append(resourceNameToVersions[res.Name], v) } } - soloKitProject.Conversions = getConversionsFromResourceProjects(resourceNameToProjects) + soloKitProject.Conversions = getConversionsFromResourceProjects(resourceNameToVersions) fs, err := generateFilesForConversionConfig(soloKitProject) if err != nil { diff --git a/pkg/code-generator/codegen/project.go b/pkg/code-generator/codegen/project.go index fc2bbb892..a05fe9154 100644 --- a/pkg/code-generator/codegen/project.go +++ b/pkg/code-generator/codegen/project.go @@ -18,23 +18,23 @@ const fileHeader = `// Code generated by solo-kit. DO NOT EDIT. ` -func GenerateProjectFiles(project *model.Version, skipOutOfPackageFiles, skipGeneratedTests bool) (code_generator.Files, error) { - files, err := generateFilesForProject(project) +func GenerateProjectFiles(version *model.Version, skipOutOfPackageFiles, skipGeneratedTests bool) (code_generator.Files, error) { + files, err := generateFilesForVersion(version) if err != nil { return nil, err } - for _, res := range project.Resources { + for _, res := range version.Resources { // only generate files for the resources in our group, otherwise we import - if !project.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { + if !version.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ - "resource proto package must match project proto package %v", res.ProtoPackage, res.Name, project.ProtoPackage) + "resource proto package must match version proto package %v", res.ProtoPackage, res.Name, version.ProtoPackage) continue } else if res.IsCustom && res.CustomResource.Imported { log.Printf("not generating solo-kit "+ "clients for resource %v.%v, "+ - "custom resources from a different project are not generated", res.GoPackage, res.Name, project.VersionConfig.GoPackage) + "custom resources from a different version are not generated", res.GoPackage, res.Name, version.VersionConfig.GoPackage) continue } @@ -45,8 +45,8 @@ func GenerateProjectFiles(project *model.Version, skipOutOfPackageFiles, skipGen files = append(files, fs...) } - for _, res := range project.XDSResources { - if skipOutOfPackageFiles && !project.VersionConfig.IsOurProto(res.Filename) { + for _, res := range version.XDSResources { + if skipOutOfPackageFiles && !version.VersionConfig.IsOurProto(res.Filename) { continue } fs, err := generateFilesForXdsResource(res) @@ -109,17 +109,17 @@ func generateFilesForResource(resource *model.Resource) (code_generator.Files, e return v, nil } -func generateFilesForProject(project *model.Version) (code_generator.Files, error) { +func generateFilesForVersion(version *model.Version) (code_generator.Files, error) { var v code_generator.Files for suffix, tmpl := range map[string]*template.Template{ "_suite_test.go": templates.ProjectTestSuiteTemplate, } { - content, err := generateProjectFile(project, tmpl) + content, err := generateVersionFile(version, tmpl) if err != nil { - return nil, errors.Wrapf(err, "internal error: processing template '%v' for apigroup %v failed", tmpl.ParseName, project.VersionConfig.ApiGroup.Name) + return nil, errors.Wrapf(err, "internal error: processing template '%v' for apigroup %v failed", tmpl.ParseName, version.VersionConfig.ApiGroup.Name) } v = append(v, code_generator.File{ - Filename: strcase.ToSnake(project.VersionConfig.ApiGroup.Name) + suffix, + Filename: strcase.ToSnake(version.VersionConfig.ApiGroup.Name) + suffix, Content: content, }) } @@ -142,9 +142,9 @@ func generateResourceFile(resource *model.Resource, tmpl *template.Template) (st return buf.String(), nil } -func generateProjectFile(project *model.Version, tmpl *template.Template) (string, error) { +func generateVersionFile(version *model.Version, tmpl *template.Template) (string, error) { buf := &bytes.Buffer{} - if err := tmpl.Execute(buf, project); err != nil { + if err := tmpl.Execute(buf, version); err != nil { return "", err } return buf.String(), nil diff --git a/pkg/code-generator/codegen/resource_group.go b/pkg/code-generator/codegen/resource_group.go index 2e0b3f75f..a4ddfe087 100644 --- a/pkg/code-generator/codegen/resource_group.go +++ b/pkg/code-generator/codegen/resource_group.go @@ -17,7 +17,6 @@ func GenerateResourceGroupFiles(apiGroup *model.ApiGroup, skipOutOfPackageFiles, var files code_generator.Files for _, grp := range apiGroup.ResourceGroupsFoo { - // TODO joekelley this check probably doesn't make sense if skipOutOfPackageFiles && !(strings.HasSuffix(grp.Name, "."+apiGroup.Name) || grp.Name == apiGroup.Name) { continue } diff --git a/pkg/code-generator/docgen/funcs/template_funcs.go b/pkg/code-generator/docgen/funcs/template_funcs.go index 8c3839634..71dfd9ef4 100644 --- a/pkg/code-generator/docgen/funcs/template_funcs.go +++ b/pkg/code-generator/docgen/funcs/template_funcs.go @@ -43,7 +43,7 @@ type templateFunctions struct { var magicCommentRegex = regexp.MustCompile("@solo-kit:.*") var githubProjectFileRegex = regexp.MustCompile(".*github.com/([^/]*)/([^/]*)/(.*)") -func TemplateFuncs(project *model.Version, docsOptions *options.DocsOptions) template.FuncMap { +func TemplateFuncs(version *model.Version, docsOptions *options.DocsOptions) template.FuncMap { funcs := &templateFunctions{} funcMap := template.FuncMap{ "join": strings.Join, @@ -54,13 +54,13 @@ func TemplateFuncs(project *model.Version, docsOptions *options.DocsOptions) tem "p": gendoc.PFilter, "para": gendoc.ParaFilter, "nobr": gendoc.NoBrFilter, - "fieldType": fieldType(project), + "fieldType": fieldType(version), "yamlType": yamlType, "noescape": noEscape, - "linkForField": linkForField(project, docsOptions), - "linkForResource": linkForResource(project, docsOptions), - "forEachMessage": funcs.forEachMessage(getMessageSkippingInfo(project)), - "resourceForMessage": resourceForMessage(project), + "linkForField": linkForField(version, docsOptions), + "linkForResource": linkForResource(version, docsOptions), + "forEachMessage": funcs.forEachMessage(getMessageSkippingInfo(version)), + "resourceForMessage": resourceForMessage(version), "getFileForMessage": func(msg *protokit.Descriptor) *protokit.FileDescriptor { return msg.GetFile() }, @@ -180,7 +180,7 @@ func noEscape(s string) htmltemplate.HTML { return htmltemplate.HTML(s) } -func fieldType(project *model.Version) func(field *protokit.FieldDescriptor) (string, error) { +func fieldType(version *model.Version) func(field *protokit.FieldDescriptor) (string, error) { return func(field *protokit.FieldDescriptor) (string, error) { fieldTypeStr := func() string { switch field.GetType() { @@ -198,7 +198,7 @@ func fieldType(project *model.Version) func(field *protokit.FieldDescriptor) (st fieldTypeStr = "[]" + strings.TrimPrefix(fieldTypeStr, ".") } if strings.HasSuffix(fieldTypeStr, "Entry") { - _, msg, enum, err := getFileAndTypeDefForField(project, field) + _, msg, enum, err := getFileAndTypeDefForField(version, field) if err != nil { return "", err } @@ -208,11 +208,11 @@ func fieldType(project *model.Version) func(field *protokit.FieldDescriptor) (st if len(msg.Field) != 2 { return "", errors.Errorf("message %v was Entry type, expected map", msg.GetName()) } - key, err := fieldType(project)(&protokit.FieldDescriptor{FieldDescriptorProto: msg.Field[0]}) + key, err := fieldType(version)(&protokit.FieldDescriptor{FieldDescriptorProto: msg.Field[0]}) if err != nil { return "", err } - val, err := fieldType(project)(&protokit.FieldDescriptor{FieldDescriptorProto: msg.Field[1]}) + val, err := fieldType(version)(&protokit.FieldDescriptor{FieldDescriptorProto: msg.Field[1]}) if err != nil { return "", err } @@ -230,16 +230,16 @@ func wellKnownProtoLink(typeName string) string { return wellKnown } -func linkForField(project *model.Version, docsOptions *options.DocsOptions) func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { +func linkForField(version *model.Version, docsOptions *options.DocsOptions) func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { return func(forFile *protokit.FileDescriptor, field *protokit.FieldDescriptor) (string, error) { - typeName, err := fieldType(project)(field) + typeName, err := fieldType(version)(field) if err != nil { return "", err } if _, ok := primitiveTypes[field.GetType()]; ok || strings.HasPrefix(typeName, "map<") { return "`" + typeName + "`", nil } - file, msg, enum, err := getFileAndTypeDefForField(project, field) + file, msg, enum, err := getFileAndTypeDefForField(version, field) if err != nil { return "", err } @@ -263,7 +263,7 @@ func linkForField(project *model.Version, docsOptions *options.DocsOptions) func } default: var linkedFile string - for _, toGenerate := range project.Request.FileToGenerate { + for _, toGenerate := range version.Request.FileToGenerate { if strings.HasSuffix(file.GetName(), toGenerate) { linkedFile = toGenerate break @@ -271,7 +271,7 @@ func linkForField(project *model.Version, docsOptions *options.DocsOptions) func } if linkedFile == "" { linkedFile = filepath.Base(file.GetName()) - //return "", errors.Errorf("failed to get generated file path for proto %v in list %v", file.GetName(), project.Request.FileToGenerate) + //return "", errors.Errorf("failed to get generated file path for proto %v in list %v", file.GetName(), version.Request.FileToGenerate) } linkedFile = relativeFilename(forFile.GetName(), linkedFile) @@ -293,8 +293,8 @@ func linkForField(project *model.Version, docsOptions *options.DocsOptions) func } } -func linkForResource(project *model.Version, docsOptions *options.DocsOptions) func(resource *model.Resource) (string, error) { - protoFiles := protokit.ParseCodeGenRequest(project.Request) +func linkForResource(version *model.Version, docsOptions *options.DocsOptions) func(resource *model.Resource) (string, error) { + protoFiles := protokit.ParseCodeGenRequest(version.Request) return func(resource *model.Resource) (string, error) { for _, file := range protoFiles { if file.GetName() == resource.Filename { @@ -315,13 +315,13 @@ func linkForResource(project *model.Version, docsOptions *options.DocsOptions) f } } return "", errors.Errorf("internal error: could not find file for resource %v in apigroup %v", - resource.Filename, project.VersionConfig.ApiGroup.Name) + resource.Filename, version.VersionConfig.ApiGroup.Name) } } -func resourceForMessage(project *model.Version) func(msg *protokit.Descriptor) (*model.Resource, error) { +func resourceForMessage(version *model.Version) func(msg *protokit.Descriptor) (*model.Resource, error) { return func(msg *protokit.Descriptor) (*model.Resource, error) { - for _, res := range project.Resources { + for _, res := range version.Resources { if res.SkipDocsGen { continue } @@ -331,7 +331,7 @@ func resourceForMessage(project *model.Version) func(msg *protokit.Descriptor) ( } return nil, nil return nil, errors.Errorf("internal error: could not find file for resource for msg %v in apigroup %v", - msg.GetName(), project.VersionConfig.ApiGroup.Name) + msg.GetName(), version.VersionConfig.ApiGroup.Name) } } @@ -408,14 +408,14 @@ func commonPrefix(sep byte, paths ...string) string { return string(c) } -func getFileForField(project *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, error) { +func getFileForField(version *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, error) { parts := strings.Split(strings.TrimPrefix(field.GetTypeName(), "."), ".") if strings.HasSuffix(parts[len(parts)-1], "Entry") { parts = parts[:len(parts)-1] } messageName := parts[len(parts)-1] packageName := strings.Join(parts[:len(parts)-1], ".") - for _, protoFile := range project.Request.GetProtoFile() { + for _, protoFile := range version.Request.GetProtoFile() { if protoFile.GetPackage() == packageName { for _, msg := range protoFile.GetMessageType() { if messageName == msg.GetName() { @@ -424,7 +424,7 @@ func getFileForField(project *model.Version, field *protokit.FieldDescriptor) (* } } } - for _, protoFile := range project.Request.ProtoFile { + for _, protoFile := range version.Request.ProtoFile { // ilackarms: unlikely event of collision where the package name has the right prefix and a nested message type matches if strings.HasPrefix(packageName, protoFile.GetPackage()) { for _, msg := range protoFile.GetMessageType() { @@ -457,9 +457,9 @@ func splitTypeName(typeName string) (string, []string) { return packageName, parts[indexOfFirstUppercasePart:] } -func getFileAndTypeDefForField(project *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, *descriptor.DescriptorProto, *descriptor.EnumDescriptorProto, error) { +func getFileAndTypeDefForField(version *model.Version, field *protokit.FieldDescriptor) (*descriptor.FileDescriptorProto, *descriptor.DescriptorProto, *descriptor.EnumDescriptorProto, error) { packageName, typeNameParts := splitTypeName(field.GetTypeName()) - for _, protoFile := range project.Request.ProtoFile { + for _, protoFile := range version.Request.ProtoFile { if protoFile.GetPackage() == packageName { if len(typeNameParts) == 1 { for _, enum := range protoFile.GetEnumType() { @@ -564,10 +564,10 @@ func (c *templateFunctions) forEachMessage(messagesToSkip map[string]bool) func( // Returns a map indicating which resources should be skipped during doc generation. // The keys are strings in the format .. -func getMessageSkippingInfo(project *model.Version) map[string]bool { +func getMessageSkippingInfo(version *model.Version) map[string]bool { // Build map for quick lookup of SkipDocsGen flag toSkip := make(map[string]bool) - for _, resource := range project.Resources { + for _, resource := range version.Resources { if resource.SkipDocsGen { continue } diff --git a/pkg/code-generator/docgen/generator.go b/pkg/code-generator/docgen/generator.go index 412d35818..8e147f930 100644 --- a/pkg/code-generator/docgen/generator.go +++ b/pkg/code-generator/docgen/generator.go @@ -89,8 +89,8 @@ func (d *DocsGen) GenerateFilesForProtoFiles(protoFiles []*protokit.FileDescript return v, nil } -func GenerateFiles(project *model.Version, docsOptions *options.DocsOptions) (code_generator.Files, error) { - protoFiles := protokit.ParseCodeGenRequest(project.Request) +func GenerateFiles(version *model.Version, docsOptions *options.DocsOptions) (code_generator.Files, error) { + protoFiles := protokit.ParseCodeGenRequest(version.Request) if docsOptions == nil { docsOptions = &options.DocsOptions{} } @@ -101,7 +101,7 @@ func GenerateFiles(project *model.Version, docsOptions *options.DocsOptions) (co docGenerator := DocsGen{ DocsOptions: *docsOptions, - Project: project, + Project: version, } files, err := docGenerator.GenerateFilesForProject() @@ -174,9 +174,9 @@ func (d *DocsGen) GenerateFilesForProject() (code_generator.Files, error) { return v, nil } -func generateProjectFile(project *model.Version, tmpl *template.Template) (string, error) { +func generateProjectFile(version *model.Version, tmpl *template.Template) (string, error) { buf := &bytes.Buffer{} - if err := tmpl.Execute(buf, project); err != nil { + if err := tmpl.Execute(buf, version); err != nil { return "", err } return buf.String(), nil diff --git a/pkg/code-generator/docgen/templates/markdown/project_template.go b/pkg/code-generator/docgen/templates/markdown/project_template.go index f76156ae7..f29bae3c5 100644 --- a/pkg/code-generator/docgen/templates/markdown/project_template.go +++ b/pkg/code-generator/docgen/templates/markdown/project_template.go @@ -9,7 +9,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProjectDocsRootTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { +func ProjectDocsRootTemplate(version *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` ### API Reference for {{ .VersionConfig.ApiGroup.SoloKitProject.Title}} @@ -29,5 +29,5 @@ API Version: ` + "`{{ .VersionConfig.ApiGroup.Name }}.{{ .VersionConfig.Version ` - return template.Must(template.New("markdown_project").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(str)) + return template.Must(template.New("markdown_project").Funcs(funcs.TemplateFuncs(version, docsOptions)).Parse(str)) } diff --git a/pkg/code-generator/docgen/templates/markdown/proto_file_template.go b/pkg/code-generator/docgen/templates/markdown/proto_file_template.go index 3624e92be..e22e6fdb4 100644 --- a/pkg/code-generator/docgen/templates/markdown/proto_file_template.go +++ b/pkg/code-generator/docgen/templates/markdown/proto_file_template.go @@ -8,7 +8,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProtoFileTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { +func ProtoFileTemplate(version *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` {{ $File := . -}} @@ -107,5 +107,5 @@ Description: {{ remove_magic_comments .Comments.Leading }} ` - return template.Must(template.New("p").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(str)) + return template.Must(template.New("p").Funcs(funcs.TemplateFuncs(version, docsOptions)).Parse(str)) } diff --git a/pkg/code-generator/docgen/templates/restructured/project_template.go b/pkg/code-generator/docgen/templates/restructured/project_template.go index 2e9397827..6c88e117b 100644 --- a/pkg/code-generator/docgen/templates/restructured/project_template.go +++ b/pkg/code-generator/docgen/templates/restructured/project_template.go @@ -9,8 +9,8 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProjectDocsRootTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { - return template.Must(template.New("restructured_project").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(` +func ProjectDocsRootTemplate(version *model.Version, docsOptions *options.DocsOptions) *template.Template { + return template.Must(template.New("restructured_project").Funcs(funcs.TemplateFuncs(version, docsOptions)).Parse(` =========================================== API Reference for {{ .VersionConfig.ApiGroup.SoloKitProject.Title }} =========================================== diff --git a/pkg/code-generator/docgen/templates/restructured/proto_file_template.go b/pkg/code-generator/docgen/templates/restructured/proto_file_template.go index b7caa9be9..40d2993db 100644 --- a/pkg/code-generator/docgen/templates/restructured/proto_file_template.go +++ b/pkg/code-generator/docgen/templates/restructured/proto_file_template.go @@ -8,7 +8,7 @@ import ( "github.com/solo-io/solo-kit/pkg/code-generator/model" ) -func ProtoFileTemplate(project *model.Version, docsOptions *options.DocsOptions) *template.Template { +func ProtoFileTemplate(version *model.Version, docsOptions *options.DocsOptions) *template.Template { str := ` {{ $File := . -}} @@ -130,5 +130,5 @@ Description: {{ remove_magic_comments .Comments.Leading }} ` - return template.Must(template.New("p").Funcs(funcs.TemplateFuncs(project, docsOptions)).Parse(str)) + return template.Must(template.New("p").Funcs(funcs.TemplateFuncs(version, docsOptions)).Parse(str)) } diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index 604da5c13..f1661d119 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -24,7 +24,7 @@ const ( shortNameDeclaration = "@solo-kit:resource.short_name=" // Deprecated, use Message Option (core.solo.io.resource).plural_name pluralNameDeclaration = "@solo-kit:resource.plural_name=" - // Deprecated, use projectConfig.ResourceGroups + // Deprecated, use versionConfig.ResourceGroups resourceGroupsDeclaration = "@solo-kit:resource.resource_groups=" ) diff --git a/pkg/code-generator/parser/parser_xds.go b/pkg/code-generator/parser/parser_xds.go index 883e3dc2a..6783970c5 100644 --- a/pkg/code-generator/parser/parser_xds.go +++ b/pkg/code-generator/parser/parser_xds.go @@ -32,7 +32,7 @@ type xdsMessage struct { fileName string } -func getXdsResources(project *model.Version, messages []ProtoMessageWrapper, services []*protokit.ServiceDescriptor) ([]*model.XDSResource, error) { +func getXdsResources(version *model.Version, messages []ProtoMessageWrapper, services []*protokit.ServiceDescriptor) ([]*model.XDSResource, error) { var msgs []*xdsMessage var svcs []*xdsService @@ -45,7 +45,7 @@ func getXdsResources(project *model.Version, messages []ProtoMessageWrapper, ser // message is not a resource continue } - if msg.protoPackage != project.ProtoPackage { + if msg.protoPackage != version.ProtoPackage { continue } msgs = append(msgs, msg) @@ -59,7 +59,7 @@ func getXdsResources(project *model.Version, messages []ProtoMessageWrapper, ser // message is not a resource continue } - if service.protoPackage != project.ProtoPackage { + if service.protoPackage != version.ProtoPackage { continue } svcs = append(svcs, service) @@ -67,10 +67,10 @@ func getXdsResources(project *model.Version, messages []ProtoMessageWrapper, ser // match time! // for every service, match it with a config message. - return processMessagesAndServices(project, msgs, svcs) + return processMessagesAndServices(version, msgs, svcs) } -func processMessagesAndServices(project *model.Version, msgs []*xdsMessage, svcs []*xdsService) ([]*model.XDSResource, error) { +func processMessagesAndServices(version *model.Version, msgs []*xdsMessage, svcs []*xdsService) ([]*model.XDSResource, error) { var resources []*model.XDSResource for _, svc := range svcs { var message *xdsMessage @@ -92,7 +92,7 @@ func processMessagesAndServices(project *model.Version, msgs []*xdsMessage, svcs NoReferences: message.noReferences, ProtoPackage: message.protoPackage, Filename: message.fileName, - Project: project, + Project: version, }) } From f86659b6a2c983c1bba550c0bf2e09a4cd7402c3 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 14:17:38 -0400 Subject: [PATCH 07/17] rename project fields in model to version --- .../codegen/templates/resource_client_template.go | 2 +- .../templates/resource_client_test_template.go | 2 +- .../templates/resource_reconciler_template.go | 2 +- .../codegen/templates/resource_template.go | 10 +++++----- .../codegen/templates/xds_template.go | 2 +- pkg/code-generator/model/project.go | 8 ++++---- pkg/code-generator/parser/parser_resource.go | 4 ++-- pkg/code-generator/parser/parser_xds.go | 14 +++++++------- 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/pkg/code-generator/codegen/templates/resource_client_template.go b/pkg/code-generator/codegen/templates/resource_client_template.go index 7b2b4acbf..1c5b2be3f 100644 --- a/pkg/code-generator/codegen/templates/resource_client_template.go +++ b/pkg/code-generator/codegen/templates/resource_client_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceClientTemplate = template.Must(template.New("resource_client").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} +var ResourceClientTemplate = template.Must(template.New("resource_client").Funcs(Funcs).Parse(`package {{ .ParentVersion.VersionConfig.Version }} import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" diff --git a/pkg/code-generator/codegen/templates/resource_client_test_template.go b/pkg/code-generator/codegen/templates/resource_client_test_template.go index cf294ef71..cf572190c 100644 --- a/pkg/code-generator/codegen/templates/resource_client_test_template.go +++ b/pkg/code-generator/codegen/templates/resource_client_test_template.go @@ -6,7 +6,7 @@ import ( var ResourceClientTestTemplate = template.Must(template.New("resource_client_test").Funcs(Funcs).Parse(`// +build solokit -package {{ .Project.VersionConfig.Version }} +package {{ .ParentVersion.VersionConfig.Version }} import ( "time" diff --git a/pkg/code-generator/codegen/templates/resource_reconciler_template.go b/pkg/code-generator/codegen/templates/resource_reconciler_template.go index 9f6995a4c..ad5d2257e 100644 --- a/pkg/code-generator/codegen/templates/resource_reconciler_template.go +++ b/pkg/code-generator/codegen/templates/resource_reconciler_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceReconcilerTemplate = template.Must(template.New("resource_reconciler").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} +var ResourceReconcilerTemplate = template.Must(template.New("resource_reconciler").Funcs(Funcs).Parse(`package {{ .ParentVersion.VersionConfig.Version }} import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" diff --git a/pkg/code-generator/codegen/templates/resource_template.go b/pkg/code-generator/codegen/templates/resource_template.go index a276b1c78..aeb2c51ae 100644 --- a/pkg/code-generator/codegen/templates/resource_template.go +++ b/pkg/code-generator/codegen/templates/resource_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var ResourceTemplate = template.Must(template.New("resource").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} +var ResourceTemplate = template.Must(template.New("resource").Funcs(Funcs).Parse(`package {{ .ParentVersion.VersionConfig.Version }} import ( "sort" @@ -195,15 +195,15 @@ func (o *{{ .Name }}) DeepCopyObject() runtime.Object { return resources.Clone(o).(*{{ .Name }}) } -{{- $crdGroupName := .Project.ProtoPackage }} -{{- if ne .Project.VersionConfig.ApiGroup.CrdGroupOverride "" }} -{{- $crdGroupName = .Project.VersionConfig.ApiGroup.CrdGroupOverride }} +{{- $crdGroupName := .ParentVersion.ProtoPackage }} +{{- if ne .ParentVersion.VersionConfig.ApiGroup.CrdGroupOverride "" }} +{{- $crdGroupName = .ParentVersion.VersionConfig.ApiGroup.CrdGroupOverride }} {{- end}} var ( {{ .Name }}GVK = schema.GroupVersionKind{ - Version: "{{ .Project.VersionConfig.Version }}", + Version: "{{ .ParentVersion.VersionConfig.Version }}", Group: "{{ $crdGroupName }}", Kind: "{{ .Name }}", } diff --git a/pkg/code-generator/codegen/templates/xds_template.go b/pkg/code-generator/codegen/templates/xds_template.go index d6c584117..bc6ae089c 100644 --- a/pkg/code-generator/codegen/templates/xds_template.go +++ b/pkg/code-generator/codegen/templates/xds_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var XdsTemplate = template.Must(template.New("xds_template").Funcs(Funcs).Parse(`package {{ .Project.VersionConfig.Version }} +var XdsTemplate = template.Must(template.New("xds_template").Funcs(Funcs).Parse(`package {{ .ParentVersion.VersionConfig.Version }} import ( "context" diff --git a/pkg/code-generator/model/project.go b/pkg/code-generator/model/project.go index 9ade419b8..de9ae2941 100644 --- a/pkg/code-generator/model/project.go +++ b/pkg/code-generator/model/project.go @@ -136,7 +136,7 @@ type Resource struct { // resource groups i belong to ResourceGroups []*ResourceGroup // project i belong to - Project *Version + ParentVersion *Version Filename string // the proto file where this resource is contained Version string // set during parsing from this resource's solo-kit.json @@ -171,8 +171,8 @@ type XDSResource struct { NameField string NoReferences bool - Project *Version - ProtoPackage string // eg. gloo.solo.io + ParentVersion *Version + ProtoPackage string // eg. gloo.solo.io Filename string // the proto file where this resource is contained } @@ -241,7 +241,7 @@ func detectGoPackageForVersion(versionDir string) (string, error) { return "", err } if goPkg == "" { - return "", errors.Errorf("no go_package statement found in root dir of project %v", versionDir) + return "", errors.Errorf("no go_package statement found in root dir of version %v", versionDir) } return goPkg, nil } diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index f1661d119..95f2b213f 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -72,7 +72,7 @@ func getResources(version *model.Version, apiGroup *model.ApiGroup, messages []P break } } - resource.Project = version + resource.ParentVersion = version resources = append(resources, resource) } @@ -88,7 +88,7 @@ func getResources(version *model.Version, apiGroup *model.ApiGroup, messages []P ClusterScoped: custom.ClusterScoped, CustomImportPrefix: impPrefix, SkipDocsGen: true, - Project: version, + ParentVersion: version, IsCustom: true, CustomResource: custom, }) diff --git a/pkg/code-generator/parser/parser_xds.go b/pkg/code-generator/parser/parser_xds.go index 6783970c5..a50acd0a3 100644 --- a/pkg/code-generator/parser/parser_xds.go +++ b/pkg/code-generator/parser/parser_xds.go @@ -86,13 +86,13 @@ func processMessagesAndServices(version *model.Version, msgs []*xdsMessage, svcs } resources = append(resources, &model.XDSResource{ - MessageType: message.name, - Name: svc.name, - NameField: message.nameField, - NoReferences: message.noReferences, - ProtoPackage: message.protoPackage, - Filename: message.fileName, - Project: version, + MessageType: message.name, + Name: svc.name, + NameField: message.nameField, + NoReferences: message.noReferences, + ProtoPackage: message.protoPackage, + Filename: message.fileName, + ParentVersion: version, }) } From 7b570ae619b627168d0895d2000c3767bf0d0eca Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 14:46:08 -0400 Subject: [PATCH 08/17] cl --- changelog/v0.11.0/conversions.yaml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 changelog/v0.11.0/conversions.yaml diff --git a/changelog/v0.11.0/conversions.yaml b/changelog/v0.11.0/conversions.yaml new file mode 100644 index 000000000..8b127ae8f --- /dev/null +++ b/changelog/v0.11.0/conversions.yaml @@ -0,0 +1,6 @@ +changelog: + - type: NEW_FEATURE + description: Generate converters for multi-version resources. + issueLink: https://github.com/solo-io/solo-kit/issues/215 + - type: BREAKING_CHANGE + description: Redefine solo-kit project concept to better support multi-version resources. \ No newline at end of file From 6d8390392005f1e11de6861be7ce73a08fd28c68 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 14:47:13 -0400 Subject: [PATCH 09/17] update cl --- changelog/v0.11.0/conversions.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/changelog/v0.11.0/conversions.yaml b/changelog/v0.11.0/conversions.yaml index 8b127ae8f..4e9788768 100644 --- a/changelog/v0.11.0/conversions.yaml +++ b/changelog/v0.11.0/conversions.yaml @@ -3,4 +3,5 @@ changelog: description: Generate converters for multi-version resources. issueLink: https://github.com/solo-io/solo-kit/issues/215 - type: BREAKING_CHANGE - description: Redefine solo-kit project concept to better support multi-version resources. \ No newline at end of file + description: Redefine solo-kit project concept to better support multi-version resources. + issueLink: https://github.com/solo-io/solo-kit/issues/215 \ No newline at end of file From f2e706d9504004785b80b0e664d96f94a2f75c68 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 15:40:12 -0400 Subject: [PATCH 10/17] dep and imports --- Gopkg.lock | 12 ------------ test/mocks/v1/mock_resources.pb.go | 3 ++- test/mocks/v1/more_mock_resources.pb.go | 3 ++- test/mocks/v1alpha1/mock_resources.pb.go | 3 ++- test/mocks/v2alpha1/mock_resources.pb.go | 3 ++- 5 files changed, 8 insertions(+), 16 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index fa6d17ad6..8601cf7eb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -840,15 +840,6 @@ revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" version = "v1.13.0" -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify.git" - version = "v1.4.7" - [[projects]] digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" name = "gopkg.in/fsnotify/fsnotify.v1" @@ -1291,7 +1282,6 @@ "google.golang.org/grpc/codes", "google.golang.org/grpc/metadata", "google.golang.org/grpc/status", - "gopkg.in/fsnotify.v1", "k8s.io/api/apps/v1", "k8s.io/api/core/v1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", @@ -1299,9 +1289,7 @@ "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/meta", "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/conversion", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", diff --git a/test/mocks/v1/mock_resources.pb.go b/test/mocks/v1/mock_resources.pb.go index bef8472a9..b2ac8cf7e 100644 --- a/test/mocks/v1/mock_resources.pb.go +++ b/test/mocks/v1/mock_resources.pb.go @@ -11,6 +11,8 @@ import ( bytes "bytes" context "context" fmt "fmt" + math "math" + v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -19,7 +21,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1/more_mock_resources.pb.go b/test/mocks/v1/more_mock_resources.pb.go index 70101aa4f..f598a1cec 100644 --- a/test/mocks/v1/more_mock_resources.pb.go +++ b/test/mocks/v1/more_mock_resources.pb.go @@ -6,10 +6,11 @@ package v1 import ( bytes "bytes" fmt "fmt" + math "math" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1alpha1/mock_resources.pb.go b/test/mocks/v1alpha1/mock_resources.pb.go index 404fe856e..621c296aa 100644 --- a/test/mocks/v1alpha1/mock_resources.pb.go +++ b/test/mocks/v1alpha1/mock_resources.pb.go @@ -10,12 +10,13 @@ package v1alpha1 import ( bytes "bytes" fmt "fmt" + math "math" + _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" _ "google.golang.org/genproto/googleapis/api/annotations" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v2alpha1/mock_resources.pb.go b/test/mocks/v2alpha1/mock_resources.pb.go index 7c48754fa..144b025db 100644 --- a/test/mocks/v2alpha1/mock_resources.pb.go +++ b/test/mocks/v2alpha1/mock_resources.pb.go @@ -6,10 +6,11 @@ package v2alpha1 import ( bytes "bytes" fmt "fmt" + math "math" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. From d590d904ae62aeb313cd20c4148e47cf8b3d3484 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 16:27:15 -0400 Subject: [PATCH 11/17] add new mock --- pkg/code-generator/cmd/main.go | 2 +- pkg/code-generator/parser/parser_resource.go | 2 +- test/mocks/group/testing_event_loop_test.go | 6 +-- test/mocks/group/testing_snapshot.sk.go | 4 +- .../group/testing_snapshot_emitter.sk.go | 18 ++++---- .../group/testing_snapshot_emitter_test.go | 42 +++++++++---------- .../testing_snapshot_simple_emitter.sk.go | 4 +- test/mocks/v1/mock_resources.pb.go | 3 +- test/mocks/v1/more_mock_resources.pb.go | 3 +- test/mocks/v1alpha1/mock_resources.pb.go | 3 +- test/mocks/v2alpha1/mock_resources.pb.go | 3 +- test/mockstwo/api/solo-kit.json | 41 ++++++++++++++++++ 12 files changed, 84 insertions(+), 47 deletions(-) create mode 100644 test/mockstwo/api/solo-kit.json diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index bd430c40a..e561a76a4 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -587,7 +587,7 @@ func importCustomResources(imports []string) ([]model.CustomResourceConfig, erro for _, vc := range soloKitProject.ApiGroup.VersionConfigs { var customResources []model.CustomResourceConfig for _, cr := range vc.CustomResources { - cr.Package = soloKitProject.ApiGroup.ResourceGroupGoPackage + cr.Package = vc.GoPackage cr.Imported = true customResources = append(customResources, cr) } diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index 95f2b213f..5b041f2a6 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -36,7 +36,7 @@ type ProtoMessageWrapper struct { func getResource(resources []*model.Resource, cfg model.ResourceConfig) (*model.Resource, error) { matches := func(res *model.Resource) bool { - return res.Name == cfg.ResourceName //&& (res.ProtoPackage == cfg.ResourcePackage || res.GoPackage == cfg.ResourcePackage) + return res.Name == cfg.ResourceName && (res.ProtoPackage == cfg.ResourcePackage || res.GoPackage == cfg.ResourcePackage) } // collect all resources that match on package and name diff --git a/test/mocks/group/testing_event_loop_test.go b/test/mocks/group/testing_event_loop_test.go index ea32363d6..23934c80a 100644 --- a/test/mocks/group/testing_event_loop_test.go +++ b/test/mocks/group/testing_event_loop_test.go @@ -9,7 +9,7 @@ import ( "sync" "time" - github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -60,7 +60,7 @@ var _ = Describe("TestingEventLoop", func() { podClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - podClient, err := github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPodClient(podClientFactory) + podClient, err := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) @@ -76,7 +76,7 @@ var _ = Describe("TestingEventLoop", func() { Expect(err).NotTo(HaveOccurred()) _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) sync := &mockTestingSyncer{} el := NewTestingEventLoop(emitter, sync) diff --git a/test/mocks/group/testing_snapshot.sk.go b/test/mocks/group/testing_snapshot.sk.go index 6dd3cbb5a..6046e1b36 100644 --- a/test/mocks/group/testing_snapshot.sk.go +++ b/test/mocks/group/testing_snapshot.sk.go @@ -5,7 +5,7 @@ package group import ( "fmt" - github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" "github.com/solo-io/go-utils/hashutils" "go.uber.org/zap" @@ -17,7 +17,7 @@ type TestingSnapshot struct { Anothermockresources AnotherMockResourceList Clusterresources ClusterResourceList Mcts MockCustomTypeList - Pods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList + Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList } func (s TestingSnapshot) Clone() TestingSnapshot { diff --git a/test/mocks/group/testing_snapshot_emitter.sk.go b/test/mocks/group/testing_snapshot_emitter.sk.go index d69f714a3..c6961bd84 100644 --- a/test/mocks/group/testing_snapshot_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_emitter.sk.go @@ -6,7 +6,7 @@ import ( "sync" "time" - github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -48,15 +48,15 @@ type TestingEmitter interface { AnotherMockResource() AnotherMockResourceClient ClusterResource() ClusterResourceClient MockCustomType() MockCustomTypeClient - Pod() github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient + Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) } -func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient) TestingEmitter { +func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, fakeResource: fakeResourceClient, @@ -75,7 +75,7 @@ type testingEmitter struct { anotherMockResource AnotherMockResourceClient clusterResource ClusterResourceClient mockCustomType MockCustomTypeClient - pod github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient + pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient } func (c *testingEmitter) Register() error { @@ -120,7 +120,7 @@ func (c *testingEmitter) MockCustomType() MockCustomTypeClient { return c.mockCustomType } -func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient { +func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient { return c.pod } @@ -167,7 +167,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) /* Create channel for Pod */ type podListWithNamespace struct { - list github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList + list github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList namespace string } podChan := make(chan podListWithNamespace) @@ -300,7 +300,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO fakesByNamespace := make(map[string]FakeResourceList) anothermockresourcesByNamespace := make(map[string]AnotherMockResourceList) mctsByNamespace := make(map[string]MockCustomTypeList) - podsByNamespace := make(map[string]github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) + podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) for { record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } @@ -374,7 +374,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace podsByNamespace[namespace] = podNamespacedList.list - var podList github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList + var podList github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList for _, pods := range podsByNamespace { podList = append(podList, pods...) } diff --git a/test/mocks/group/testing_snapshot_emitter_test.go b/test/mocks/group/testing_snapshot_emitter_test.go index ee5784a6b..420274d10 100644 --- a/test/mocks/group/testing_snapshot_emitter_test.go +++ b/test/mocks/group/testing_snapshot_emitter_test.go @@ -9,7 +9,7 @@ import ( "os" "time" - github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -47,7 +47,7 @@ var _ = Describe("GroupEmitter", func() { anotherMockResourceClient AnotherMockResourceClient clusterResourceClient ClusterResourceClient mockCustomTypeClient MockCustomTypeClient - podClient github_com_solo_io_solo_kit_api_external_kubernetes_group.PodClient + podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient ) BeforeEach(func() { @@ -106,7 +106,7 @@ var _ = Describe("GroupEmitter", func() { Cache: memory.NewInMemoryResourceCache(), } - podClient, err = github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPodClient(podClientFactory) + podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) }) @@ -408,7 +408,7 @@ var _ = Describe("GroupEmitter", func() { Pod */ - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList, unexpectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) { + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { drain: for { select { @@ -434,32 +434,32 @@ var _ = Describe("GroupEmitter", func() { } } } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod2a, pod2b}) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}) + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) }) It("tracks snapshots on changes to any resource using AllNamespace", func() { ctx := context.Background() @@ -753,7 +753,7 @@ var _ = Describe("GroupEmitter", func() { Pod */ - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList, unexpectpods github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList) { + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { drain: for { select { @@ -779,31 +779,31 @@ var _ = Describe("GroupEmitter", func() { } } } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_api_external_kubernetes_group.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod2a, pod2b}) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_api_external_kubernetes_group.PodList{pod1a, pod1b, pod2a, pod2b}) + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) }) }) diff --git a/test/mocks/group/testing_snapshot_simple_emitter.sk.go b/test/mocks/group/testing_snapshot_simple_emitter.sk.go index e5b751635..a236afcc1 100644 --- a/test/mocks/group/testing_snapshot_simple_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_simple_emitter.sk.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - github_com_solo_io_solo_kit_api_external_kubernetes_group "github.com/solo-io/solo-kit/api/external/kubernetes/group" + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" "go.opencensus.io/stats" @@ -93,7 +93,7 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) case *MockCustomType: currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) - case *github_com_solo_io_solo_kit_api_external_kubernetes_group.Pod: + case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: currentSnapshot.Pods = append(currentSnapshot.Pods, typed) default: select { diff --git a/test/mocks/v1/mock_resources.pb.go b/test/mocks/v1/mock_resources.pb.go index b2ac8cf7e..bef8472a9 100644 --- a/test/mocks/v1/mock_resources.pb.go +++ b/test/mocks/v1/mock_resources.pb.go @@ -11,8 +11,6 @@ import ( bytes "bytes" context "context" fmt "fmt" - math "math" - v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -21,6 +19,7 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1/more_mock_resources.pb.go b/test/mocks/v1/more_mock_resources.pb.go index f598a1cec..70101aa4f 100644 --- a/test/mocks/v1/more_mock_resources.pb.go +++ b/test/mocks/v1/more_mock_resources.pb.go @@ -6,11 +6,10 @@ package v1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v1alpha1/mock_resources.pb.go b/test/mocks/v1alpha1/mock_resources.pb.go index 621c296aa..404fe856e 100644 --- a/test/mocks/v1alpha1/mock_resources.pb.go +++ b/test/mocks/v1alpha1/mock_resources.pb.go @@ -10,13 +10,12 @@ package v1alpha1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mocks/v2alpha1/mock_resources.pb.go b/test/mocks/v2alpha1/mock_resources.pb.go index 144b025db..7c48754fa 100644 --- a/test/mocks/v2alpha1/mock_resources.pb.go +++ b/test/mocks/v2alpha1/mock_resources.pb.go @@ -6,11 +6,10 @@ package v2alpha1 import ( bytes "bytes" fmt "fmt" - math "math" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/mockstwo/api/solo-kit.json b/test/mockstwo/api/solo-kit.json new file mode 100644 index 000000000..af4cdd521 --- /dev/null +++ b/test/mockstwo/api/solo-kit.json @@ -0,0 +1,41 @@ +{ + "title": "Solo-Kit Testing Two", + "description": "mock solo-kit project", + "api_group": { + "name": "testingtwo.solo.io", + "docs_dir": "test/mockstwo/docs", + "conversion_go_package": "github.com/solo-io/solo-kit/test/mockstwo/conversion", + "resource_group_go_package": "github.com/solo-io/solo-kit/test/mockstwo/group", + "imports": [ + "github.com/solo-io/solo-kit/api/external/kubernetes" + ], + "resource_groups": { + "testing.solo.io": [ + { + "name": "MockResource", + "package": "testing.solo.io" + }, + { + "name": "FakeResource", + "package": "testing.solo.io" + }, + { + "name": "AnotherMockResource", + "package": "testing.solo.io" + }, + { + "name": "ClusterResource", + "package": "testing.solo.io" + }, + { + "name": "MockCustomType", + "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + }, + { + "name": "Pod", + "package": "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + } + ] + } + } +} \ No newline at end of file From dffee5518e2ad922236424473c019dc5247a80a5 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Tue, 16 Jul 2019 18:28:05 -0400 Subject: [PATCH 12/17] allow out of package, gather all resources --- pkg/code-generator/cmd/main.go | 21 +- test/mockstwo/api/solo-kit.json | 1 - test/mockstwo/group/testing_event_loop.sk.go | 93 ++ .../mockstwo/group/testing_event_loop_test.go | 106 +++ .../group/testing_simple_event_loop.sk.go | 122 +++ test/mockstwo/group/testing_snapshot.sk.go | 138 +++ .../group/testing_snapshot_emitter.sk.go | 387 +++++++++ .../group/testing_snapshot_emitter_test.go | 810 ++++++++++++++++++ .../testing_snapshot_simple_emitter.sk.go | 113 +++ 9 files changed, 1784 insertions(+), 7 deletions(-) create mode 100644 test/mockstwo/group/testing_event_loop.sk.go create mode 100644 test/mockstwo/group/testing_event_loop_test.go create mode 100644 test/mockstwo/group/testing_simple_event_loop.sk.go create mode 100644 test/mockstwo/group/testing_snapshot.sk.go create mode 100644 test/mockstwo/group/testing_snapshot_emitter.sk.go create mode 100644 test/mockstwo/group/testing_snapshot_emitter_test.go create mode 100644 test/mockstwo/group/testing_snapshot_simple_emitter.sk.go diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index e561a76a4..38d395dc9 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -140,9 +140,22 @@ func Generate(opts GenerateOptions) error { } } + // Accumulate all resources in order to handle cross-project dependencies + var allResources []*model.Resource for _, skp := range soloKitProjects { skp.ApiGroup.SoloKitProject = skp - // Store all projects for conversion generation. + for _, vc := range skp.ApiGroup.VersionConfigs { + vc.ApiGroup = skp.ApiGroup + version, err := parser.ProcessDescriptors(vc, skp.ApiGroup, protoDescriptors) + if err != nil { + return err + } + allResources = append(allResources, version.Resources...) + } + } + + for _, skp := range soloKitProjects { + // Store all versions on each api group for conversion generation. var apiGroupVersions []*model.Version for _, vc := range skp.ApiGroup.VersionConfigs { vc.ApiGroup = skp.ApiGroup @@ -195,16 +208,12 @@ func Generate(opts GenerateOptions) error { } if skp.ApiGroup.ResourceGroupGoPackage != "" { - var allResources []*model.Resource - for _, v := range apiGroupVersions { - allResources = append(allResources, v.Resources...) - } skp.ApiGroup.ResourceGroupsFoo, err = parser.GetResourceGroups(skp.ApiGroup, allResources) if err != nil { return err } - code, err := codegen.GenerateResourceGroupFiles(skp.ApiGroup, true, opts.SkipGeneratedTests) + code, err := codegen.GenerateResourceGroupFiles(skp.ApiGroup, false, opts.SkipGeneratedTests) if err != nil { return err } diff --git a/test/mockstwo/api/solo-kit.json b/test/mockstwo/api/solo-kit.json index af4cdd521..40354f64d 100644 --- a/test/mockstwo/api/solo-kit.json +++ b/test/mockstwo/api/solo-kit.json @@ -4,7 +4,6 @@ "api_group": { "name": "testingtwo.solo.io", "docs_dir": "test/mockstwo/docs", - "conversion_go_package": "github.com/solo-io/solo-kit/test/mockstwo/conversion", "resource_group_go_package": "github.com/solo-io/solo-kit/test/mockstwo/group", "imports": [ "github.com/solo-io/solo-kit/api/external/kubernetes" diff --git a/test/mockstwo/group/testing_event_loop.sk.go b/test/mockstwo/group/testing_event_loop.sk.go new file mode 100644 index 000000000..68de46e85 --- /dev/null +++ b/test/mockstwo/group/testing_event_loop.sk.go @@ -0,0 +1,93 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "context" + + "go.opencensus.io/trace" + + "github.com/hashicorp/go-multierror" + + "github.com/solo-io/go-utils/contextutils" + "github.com/solo-io/go-utils/errutils" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" + "github.com/solo-io/solo-kit/pkg/errors" +) + +type TestingSyncer interface { + Sync(context.Context, *TestingSnapshot) error +} + +type TestingSyncers []TestingSyncer + +func (s TestingSyncers) Sync(ctx context.Context, snapshot *TestingSnapshot) error { + var multiErr *multierror.Error + for _, syncer := range s { + if err := syncer.Sync(ctx, snapshot); err != nil { + multiErr = multierror.Append(multiErr, err) + } + } + return multiErr.ErrorOrNil() +} + +type testingEventLoop struct { + emitter TestingEmitter + syncer TestingSyncer +} + +func NewTestingEventLoop(emitter TestingEmitter, syncer TestingSyncer) eventloop.EventLoop { + return &testingEventLoop{ + emitter: emitter, + syncer: syncer, + } +} + +func (el *testingEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { + opts = opts.WithDefaults() + opts.Ctx = contextutils.WithLogger(opts.Ctx, "group.event_loop") + logger := contextutils.LoggerFrom(opts.Ctx) + logger.Infof("event loop started") + + errs := make(chan error) + + watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) + if err != nil { + return nil, errors.Wrapf(err, "starting snapshot watch") + } + go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "group.emitter errors") + go func() { + // create a new context for each loop, cancel it before each loop + var cancel context.CancelFunc = func() {} + // use closure to allow cancel function to be updated as context changes + defer func() { cancel() }() + for { + select { + case snapshot, ok := <-watch: + if !ok { + return + } + // cancel any open watches from previous loop + cancel() + + ctx, span := trace.StartSpan(opts.Ctx, "testing.solo.io.EventLoopSync") + ctx, canc := context.WithCancel(ctx) + cancel = canc + err := el.syncer.Sync(ctx, snapshot) + span.End() + + if err != nil { + select { + case errs <- err: + default: + logger.Errorf("write error channel is full! could not propagate err: %v", err) + } + } + case <-opts.Ctx.Done(): + return + } + } + }() + return errs, nil +} diff --git a/test/mockstwo/group/testing_event_loop_test.go b/test/mockstwo/group/testing_event_loop_test.go new file mode 100644 index 000000000..344294f2d --- /dev/null +++ b/test/mockstwo/group/testing_event_loop_test.go @@ -0,0 +1,106 @@ +// Code generated by solo-kit. DO NOT EDIT. + +// +build solokit + +package group + +import ( + "context" + "sync" + "time" + + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" +) + +var _ = Describe("TestingEventLoop", func() { + var ( + namespace string + emitter TestingEmitter + err error + ) + + BeforeEach(func() { + + mockResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + mockResourceClient, err := testing_solo_io.NewMockResourceClient(mockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + + fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + fakeResourceClient, err := testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + + anotherMockResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + anotherMockResourceClient, err := testing_solo_io.NewAnotherMockResourceClient(anotherMockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + + clusterResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + clusterResourceClient, err := testing_solo_io.NewClusterResourceClient(clusterResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + + mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + mockCustomTypeClient, err := NewMockCustomTypeClient(mockCustomTypeClientFactory) + Expect(err).NotTo(HaveOccurred()) + + podClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + podClient, err := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) + Expect(err).NotTo(HaveOccurred()) + + emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) + }) + It("runs sync function on a new snapshot", func() { + _, err = emitter.MockResource().Write(testing_solo_io.NewMockResource(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + _, err = emitter.FakeResource().Write(testing_solo_io.NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + _, err = emitter.AnotherMockResource().Write(testing_solo_io.NewAnotherMockResource(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + _, err = emitter.ClusterResource().Write(testing_solo_io.NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) + sync := &mockTestingSyncer{} + el := NewTestingEventLoop(emitter, sync) + _, err := el.Run([]string{namespace}, clients.WatchOpts{}) + Expect(err).NotTo(HaveOccurred()) + Eventually(sync.Synced, 5*time.Second).Should(BeTrue()) + }) +}) + +type mockTestingSyncer struct { + synced bool + mutex sync.Mutex +} + +func (s *mockTestingSyncer) Synced() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.synced +} + +func (s *mockTestingSyncer) Sync(ctx context.Context, snap *TestingSnapshot) error { + s.mutex.Lock() + s.synced = true + s.mutex.Unlock() + return nil +} diff --git a/test/mockstwo/group/testing_simple_event_loop.sk.go b/test/mockstwo/group/testing_simple_event_loop.sk.go new file mode 100644 index 000000000..1109a3f6a --- /dev/null +++ b/test/mockstwo/group/testing_simple_event_loop.sk.go @@ -0,0 +1,122 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "context" + "fmt" + + "go.opencensus.io/trace" + + "github.com/solo-io/go-utils/contextutils" + "github.com/solo-io/go-utils/errutils" + "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" + "github.com/solo-io/solo-kit/pkg/errors" +) + +// SyncDeciders Syncer which implements this interface +// can make smarter decisions over whether +// it should be restarted (including having its context cancelled) +// based on a diff of the previous and current snapshot + +// Deprecated: use TestingSyncDeciderWithContext +type TestingSyncDecider interface { + TestingSyncer + ShouldSync(old, new *TestingSnapshot) bool +} + +type TestingSyncDeciderWithContext interface { + TestingSyncer + ShouldSync(ctx context.Context, old, new *TestingSnapshot) bool +} + +type testingSimpleEventLoop struct { + emitter TestingSimpleEmitter + syncers []TestingSyncer +} + +func NewTestingSimpleEventLoop(emitter TestingSimpleEmitter, syncers ...TestingSyncer) eventloop.SimpleEventLoop { + return &testingSimpleEventLoop{ + emitter: emitter, + syncers: syncers, + } +} + +func (el *testingSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { + ctx = contextutils.WithLogger(ctx, "group.event_loop") + logger := contextutils.LoggerFrom(ctx) + logger.Infof("event loop started") + + errs := make(chan error) + + watch, emitterErrs, err := el.emitter.Snapshots(ctx) + if err != nil { + return nil, errors.Wrapf(err, "starting snapshot watch") + } + + go errutils.AggregateErrs(ctx, errs, emitterErrs, "group.emitter errors") + go func() { + // create a new context for each syncer for each loop, cancel each before each loop + syncerCancels := make(map[TestingSyncer]context.CancelFunc) + + // use closure to allow cancel function to be updated as context changes + defer func() { + for _, cancel := range syncerCancels { + cancel() + } + }() + + // cache the previous snapshot for comparison + var previousSnapshot *TestingSnapshot + + for { + select { + case snapshot, ok := <-watch: + if !ok { + return + } + + // cancel any open watches from previous loop + for _, syncer := range el.syncers { + // allow the syncer to decide if we should sync it + cancel its previous context + if syncDecider, isDecider := syncer.(TestingSyncDecider); isDecider { + if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { + continue // skip syncing this syncer + } + } else if syncDeciderWithContext, isDecider := syncer.(TestingSyncDeciderWithContext); isDecider { + if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { + continue // skip syncing this syncer + } + } + + // if this syncer had a previous context, cancel it + cancel, ok := syncerCancels[syncer] + if ok { + cancel() + } + + ctx, span := trace.StartSpan(ctx, fmt.Sprintf("testing.solo.io.SimpleEventLoopSync-%T", syncer)) + ctx, canc := context.WithCancel(ctx) + err := syncer.Sync(ctx, snapshot) + span.End() + + if err != nil { + select { + case errs <- err: + default: + logger.Errorf("write error channel is full! could not propagate err: %v", err) + } + } + + syncerCancels[syncer] = canc + } + + previousSnapshot = snapshot + + case <-ctx.Done(): + return + } + } + }() + return errs, nil +} diff --git a/test/mockstwo/group/testing_snapshot.sk.go b/test/mockstwo/group/testing_snapshot.sk.go new file mode 100644 index 000000000..87db91fc5 --- /dev/null +++ b/test/mockstwo/group/testing_snapshot.sk.go @@ -0,0 +1,138 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "fmt" + + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + + "github.com/solo-io/go-utils/hashutils" + "go.uber.org/zap" +) + +type TestingSnapshot struct { + Mocks testing_solo_io.MockResourceList + Fakes testing_solo_io.FakeResourceList + Anothermockresources testing_solo_io.AnotherMockResourceList + Clusterresources testing_solo_io.ClusterResourceList + Mcts MockCustomTypeList + Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList +} + +func (s TestingSnapshot) Clone() TestingSnapshot { + return TestingSnapshot{ + Mocks: s.Mocks.Clone(), + Fakes: s.Fakes.Clone(), + Anothermockresources: s.Anothermockresources.Clone(), + Clusterresources: s.Clusterresources.Clone(), + Mcts: s.Mcts.Clone(), + Pods: s.Pods.Clone(), + } +} + +func (s TestingSnapshot) Hash() uint64 { + return hashutils.HashAll( + s.hashMocks(), + s.hashFakes(), + s.hashAnothermockresources(), + s.hashClusterresources(), + s.hashMcts(), + s.hashPods(), + ) +} + +func (s TestingSnapshot) hashMocks() uint64 { + return hashutils.HashAll(s.Mocks.AsInterfaces()...) +} + +func (s TestingSnapshot) hashFakes() uint64 { + return hashutils.HashAll(s.Fakes.AsInterfaces()...) +} + +func (s TestingSnapshot) hashAnothermockresources() uint64 { + return hashutils.HashAll(s.Anothermockresources.AsInterfaces()...) +} + +func (s TestingSnapshot) hashClusterresources() uint64 { + return hashutils.HashAll(s.Clusterresources.AsInterfaces()...) +} + +func (s TestingSnapshot) hashMcts() uint64 { + return hashutils.HashAll(s.Mcts.AsInterfaces()...) +} + +func (s TestingSnapshot) hashPods() uint64 { + return hashutils.HashAll(s.Pods.AsInterfaces()...) +} + +func (s TestingSnapshot) HashFields() []zap.Field { + var fields []zap.Field + fields = append(fields, zap.Uint64("mocks", s.hashMocks())) + fields = append(fields, zap.Uint64("fakes", s.hashFakes())) + fields = append(fields, zap.Uint64("anothermockresources", s.hashAnothermockresources())) + fields = append(fields, zap.Uint64("clusterresources", s.hashClusterresources())) + fields = append(fields, zap.Uint64("mcts", s.hashMcts())) + fields = append(fields, zap.Uint64("pods", s.hashPods())) + + return append(fields, zap.Uint64("snapshotHash", s.Hash())) +} + +type TestingSnapshotStringer struct { + Version uint64 + Mocks []string + Fakes []string + Anothermockresources []string + Clusterresources []string + Mcts []string + Pods []string +} + +func (ss TestingSnapshotStringer) String() string { + s := fmt.Sprintf("TestingSnapshot %v\n", ss.Version) + + s += fmt.Sprintf(" Mocks %v\n", len(ss.Mocks)) + for _, name := range ss.Mocks { + s += fmt.Sprintf(" %v\n", name) + } + + s += fmt.Sprintf(" Fakes %v\n", len(ss.Fakes)) + for _, name := range ss.Fakes { + s += fmt.Sprintf(" %v\n", name) + } + + s += fmt.Sprintf(" Anothermockresources %v\n", len(ss.Anothermockresources)) + for _, name := range ss.Anothermockresources { + s += fmt.Sprintf(" %v\n", name) + } + + s += fmt.Sprintf(" Clusterresources %v\n", len(ss.Clusterresources)) + for _, name := range ss.Clusterresources { + s += fmt.Sprintf(" %v\n", name) + } + + s += fmt.Sprintf(" Mcts %v\n", len(ss.Mcts)) + for _, name := range ss.Mcts { + s += fmt.Sprintf(" %v\n", name) + } + + s += fmt.Sprintf(" Pods %v\n", len(ss.Pods)) + for _, name := range ss.Pods { + s += fmt.Sprintf(" %v\n", name) + } + + return s +} + +func (s TestingSnapshot) Stringer() TestingSnapshotStringer { + return TestingSnapshotStringer{ + Version: s.Hash(), + Mocks: s.Mocks.NamespacesDotNames(), + Fakes: s.Fakes.NamespacesDotNames(), + Anothermockresources: s.Anothermockresources.NamespacesDotNames(), + Clusterresources: s.Clusterresources.Names(), + Mcts: s.Mcts.NamespacesDotNames(), + Pods: s.Pods.NamespacesDotNames(), + } +} diff --git a/test/mockstwo/group/testing_snapshot_emitter.sk.go b/test/mockstwo/group/testing_snapshot_emitter.sk.go new file mode 100644 index 000000000..9a55d827a --- /dev/null +++ b/test/mockstwo/group/testing_snapshot_emitter.sk.go @@ -0,0 +1,387 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "sync" + "time" + + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/solo-io/go-utils/errutils" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/errors" +) + +var ( + mTestingSnapshotIn = stats.Int64("testing.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") + mTestingSnapshotOut = stats.Int64("testing.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") + + testingsnapshotInView = &view.View{ + Name: "testing.solo.io_snap_emitter/snap_in", + Measure: mTestingSnapshotIn, + Description: "The number of snapshots updates coming in", + Aggregation: view.Count(), + TagKeys: []tag.Key{}, + } + testingsnapshotOutView = &view.View{ + Name: "testing.solo.io/snap_emitter/snap_out", + Measure: mTestingSnapshotOut, + Description: "The number of snapshots updates going out", + Aggregation: view.Count(), + TagKeys: []tag.Key{}, + } +) + +func init() { + view.Register(testingsnapshotInView, testingsnapshotOutView) +} + +type TestingEmitter interface { + Register() error + MockResource() testing_solo_io.MockResourceClient + FakeResource() testing_solo_io.FakeResourceClient + AnotherMockResource() testing_solo_io.AnotherMockResourceClient + ClusterResource() testing_solo_io.ClusterResourceClient + MockCustomType() MockCustomTypeClient + Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) +} + +func NewTestingEmitter(mockResourceClient testing_solo_io.MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, anotherMockResourceClient testing_solo_io.AnotherMockResourceClient, clusterResourceClient testing_solo_io.ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { + return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) +} + +func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io.MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, anotherMockResourceClient testing_solo_io.AnotherMockResourceClient, clusterResourceClient testing_solo_io.ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { + return &testingEmitter{ + mockResource: mockResourceClient, + fakeResource: fakeResourceClient, + anotherMockResource: anotherMockResourceClient, + clusterResource: clusterResourceClient, + mockCustomType: mockCustomTypeClient, + pod: podClient, + forceEmit: emit, + } +} + +type testingEmitter struct { + forceEmit <-chan struct{} + mockResource testing_solo_io.MockResourceClient + fakeResource testing_solo_io.FakeResourceClient + anotherMockResource testing_solo_io.AnotherMockResourceClient + clusterResource testing_solo_io.ClusterResourceClient + mockCustomType MockCustomTypeClient + pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient +} + +func (c *testingEmitter) Register() error { + if err := c.mockResource.Register(); err != nil { + return err + } + if err := c.fakeResource.Register(); err != nil { + return err + } + if err := c.anotherMockResource.Register(); err != nil { + return err + } + if err := c.clusterResource.Register(); err != nil { + return err + } + if err := c.mockCustomType.Register(); err != nil { + return err + } + if err := c.pod.Register(); err != nil { + return err + } + return nil +} + +func (c *testingEmitter) MockResource() testing_solo_io.MockResourceClient { + return c.mockResource +} + +func (c *testingEmitter) FakeResource() testing_solo_io.FakeResourceClient { + return c.fakeResource +} + +func (c *testingEmitter) AnotherMockResource() testing_solo_io.AnotherMockResourceClient { + return c.anotherMockResource +} + +func (c *testingEmitter) ClusterResource() testing_solo_io.ClusterResourceClient { + return c.clusterResource +} + +func (c *testingEmitter) MockCustomType() MockCustomTypeClient { + return c.mockCustomType +} + +func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient { + return c.pod +} + +func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { + + if len(watchNamespaces) == 0 { + watchNamespaces = []string{""} + } + + for _, ns := range watchNamespaces { + if ns == "" && len(watchNamespaces) > 1 { + return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + + "specific namespaces or \"\" AllNamespaces, but not both.") + } + } + + errs := make(chan error) + var done sync.WaitGroup + ctx := opts.Ctx + /* Create channel for MockResource */ + type mockResourceListWithNamespace struct { + list testing_solo_io.MockResourceList + namespace string + } + mockResourceChan := make(chan mockResourceListWithNamespace) + /* Create channel for FakeResource */ + type fakeResourceListWithNamespace struct { + list testing_solo_io.FakeResourceList + namespace string + } + fakeResourceChan := make(chan fakeResourceListWithNamespace) + /* Create channel for AnotherMockResource */ + type anotherMockResourceListWithNamespace struct { + list testing_solo_io.AnotherMockResourceList + namespace string + } + anotherMockResourceChan := make(chan anotherMockResourceListWithNamespace) + /* Create channel for ClusterResource */ + /* Create channel for MockCustomType */ + type mockCustomTypeListWithNamespace struct { + list MockCustomTypeList + namespace string + } + mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) + /* Create channel for Pod */ + type podListWithNamespace struct { + list github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList + namespace string + } + podChan := make(chan podListWithNamespace) + + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for MockResource */ + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + /* Setup namespaced watch for FakeResource */ + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FakeResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") + }(namespace) + /* Setup namespaced watch for AnotherMockResource */ + anotherMockResourceNamespacesChan, anotherMockResourceErrs, err := c.anotherMockResource.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting AnotherMockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, anotherMockResourceErrs, namespace+"-anothermockresources") + }(namespace) + /* Setup namespaced watch for MockCustomType */ + mockCustomTypeNamespacesChan, mockCustomTypeErrs, err := c.mockCustomType.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockCustomType watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomTypeErrs, namespace+"-mcts") + }(namespace) + /* Setup namespaced watch for Pod */ + podNamespacesChan, podErrs, err := c.pod.Watch(namespace, opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting Pod watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, podErrs, namespace+"-pods") + }(namespace) + + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return + case mockResourceList := <-mockResourceNamespacesChan: + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case fakeResourceList := <-fakeResourceNamespacesChan: + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + case anotherMockResourceList := <-anotherMockResourceNamespacesChan: + select { + case <-ctx.Done(): + return + case anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: anotherMockResourceList, namespace: namespace}: + } + case mockCustomTypeList := <-mockCustomTypeNamespacesChan: + select { + case <-ctx.Done(): + return + case mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: mockCustomTypeList, namespace: namespace}: + } + case podList := <-podNamespacesChan: + select { + case <-ctx.Done(): + return + case podChan <- podListWithNamespace{list: podList, namespace: namespace}: + } + } + } + }(namespace) + } + /* Setup cluster-wide watch for ClusterResource */ + + clusterResourceChan, clusterResourceErrs, err := c.clusterResource.Watch(opts) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting ClusterResource watch") + } + done.Add(1) + go func() { + defer done.Done() + errutils.AggregateErrs(ctx, errs, clusterResourceErrs, "clusterresources") + }() + + snapshots := make(chan *TestingSnapshot) + go func() { + originalSnapshot := TestingSnapshot{} + currentSnapshot := originalSnapshot.Clone() + timer := time.NewTicker(time.Second * 1) + sync := func() { + if originalSnapshot.Hash() == currentSnapshot.Hash() { + return + } + + stats.Record(ctx, mTestingSnapshotOut.M(1)) + originalSnapshot = currentSnapshot.Clone() + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + } + mocksByNamespace := make(map[string]testing_solo_io.MockResourceList) + fakesByNamespace := make(map[string]testing_solo_io.FakeResourceList) + anothermockresourcesByNamespace := make(map[string]testing_solo_io.AnotherMockResourceList) + mctsByNamespace := make(map[string]MockCustomTypeList) + podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) + + for { + record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } + + select { + case <-timer.C: + sync() + case <-ctx.Done(): + close(snapshots) + done.Wait() + close(errs) + return + case <-c.forceEmit: + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + case mockResourceNamespacedList := <-mockResourceChan: + record() + + namespace := mockResourceNamespacedList.namespace + + // merge lists by namespace + mocksByNamespace[namespace] = mockResourceNamespacedList.list + var mockResourceList testing_solo_io.MockResourceList + for _, mocks := range mocksByNamespace { + mockResourceList = append(mockResourceList, mocks...) + } + currentSnapshot.Mocks = mockResourceList.Sort() + case fakeResourceNamespacedList := <-fakeResourceChan: + record() + + namespace := fakeResourceNamespacedList.namespace + + // merge lists by namespace + fakesByNamespace[namespace] = fakeResourceNamespacedList.list + var fakeResourceList testing_solo_io.FakeResourceList + for _, fakes := range fakesByNamespace { + fakeResourceList = append(fakeResourceList, fakes...) + } + currentSnapshot.Fakes = fakeResourceList.Sort() + case anotherMockResourceNamespacedList := <-anotherMockResourceChan: + record() + + namespace := anotherMockResourceNamespacedList.namespace + + // merge lists by namespace + anothermockresourcesByNamespace[namespace] = anotherMockResourceNamespacedList.list + var anotherMockResourceList testing_solo_io.AnotherMockResourceList + for _, anothermockresources := range anothermockresourcesByNamespace { + anotherMockResourceList = append(anotherMockResourceList, anothermockresources...) + } + currentSnapshot.Anothermockresources = anotherMockResourceList.Sort() + case clusterResourceList := <-clusterResourceChan: + record() + currentSnapshot.Clusterresources = clusterResourceList + case mockCustomTypeNamespacedList := <-mockCustomTypeChan: + record() + + namespace := mockCustomTypeNamespacedList.namespace + + // merge lists by namespace + mctsByNamespace[namespace] = mockCustomTypeNamespacedList.list + var mockCustomTypeList MockCustomTypeList + for _, mcts := range mctsByNamespace { + mockCustomTypeList = append(mockCustomTypeList, mcts...) + } + currentSnapshot.Mcts = mockCustomTypeList.Sort() + case podNamespacedList := <-podChan: + record() + + namespace := podNamespacedList.namespace + + // merge lists by namespace + podsByNamespace[namespace] = podNamespacedList.list + var podList github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList + for _, pods := range podsByNamespace { + podList = append(podList, pods...) + } + currentSnapshot.Pods = podList.Sort() + } + } + }() + return snapshots, errs, nil +} diff --git a/test/mockstwo/group/testing_snapshot_emitter_test.go b/test/mockstwo/group/testing_snapshot_emitter_test.go new file mode 100644 index 000000000..1ddab63e7 --- /dev/null +++ b/test/mockstwo/group/testing_snapshot_emitter_test.go @@ -0,0 +1,810 @@ +// Code generated by solo-kit. DO NOT EDIT. + +// +build solokit + +package group + +import ( + "context" + "os" + "time" + + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/solo-io/go-utils/kubeutils" + "github.com/solo-io/go-utils/log" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + // Needed to run tests in GKE + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + // From https://github.com/kubernetes/client-go/blob/53c7adfd0294caa142d961e1f780f74081d5b15f/examples/out-of-cluster-client-configuration/main.go#L31 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var _ = Describe("GroupEmitter", func() { + if os.Getenv("RUN_KUBE_TESTS") != "1" { + log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") + return + } + var ( + namespace1 string + namespace2 string + name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + cfg *rest.Config + kube kubernetes.Interface + emitter TestingEmitter + mockResourceClient testing_solo_io.MockResourceClient + fakeResourceClient testing_solo_io.FakeResourceClient + anotherMockResourceClient testing_solo_io.AnotherMockResourceClient + clusterResourceClient testing_solo_io.ClusterResourceClient + mockCustomTypeClient MockCustomTypeClient + podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + ) + + BeforeEach(func() { + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + kube = helpers.MustKubeClient() + err := kubeutils.CreateNamespacesInParallel(kube, namespace1, namespace2) + Expect(err).NotTo(HaveOccurred()) + cfg, err = kubeutils.GetConfig("", "") + Expect(err).NotTo(HaveOccurred()) + // MockResource Constructor + mockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: testing_solo_io.MockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + mockResourceClient, err = testing_solo_io.NewMockResourceClient(mockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // FakeResource Constructor + fakeResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: testing_solo_io.FakeResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + fakeResourceClient, err = testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // AnotherMockResource Constructor + anotherMockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: testing_solo_io.AnotherMockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + anotherMockResourceClient, err = testing_solo_io.NewAnotherMockResourceClient(anotherMockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // ClusterResource Constructor + clusterResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: testing_solo_io.ClusterResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + clusterResourceClient, err = testing_solo_io.NewClusterResourceClient(clusterResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // MockCustomType Constructor + mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + mockCustomTypeClient, err = NewMockCustomTypeClient(mockCustomTypeClientFactory) + Expect(err).NotTo(HaveOccurred()) + // Pod Constructor + podClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(podClientFactory) + Expect(err).NotTo(HaveOccurred()) + emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) + }) + AfterEach(func() { + err := kubeutils.DeleteNamespacesInParallelBlocking(kube, namespace1, namespace2) + Expect(err).NotTo(HaveOccurred()) + clusterResourceClient.Delete(name1, clients.DeleteOpts{}) + clusterResourceClient.Delete(name2, clients.DeleteOpts{}) + }) + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks testing_solo_io.MockResourceList, unexpectMocks testing_solo_io.MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockResource1a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io.MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(nil, testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io.AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + + err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + + err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(nil, testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io.ClusterResourceList, unexpectClusterresources testing_solo_io.ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + + err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, testing_solo_io.ClusterResourceList{clusterResource2a}) + + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(nil, testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}) + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + + err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + + err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + + err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + }) + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks testing_solo_io.MockResourceList, unexpectMocks testing_solo_io.MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockResource1a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io.MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(nil, testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io.AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + + err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + + err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(nil, testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io.ClusterResourceList, unexpectClusterresources testing_solo_io.ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + + err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, testing_solo_io.ClusterResourceList{clusterResource2a}) + + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(nil, testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}) + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + + err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + + err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + + err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + }) +}) diff --git a/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go new file mode 100644 index 000000000..c4d0dddb5 --- /dev/null +++ b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go @@ -0,0 +1,113 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group + +import ( + "context" + "fmt" + "time" + + github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + + "go.opencensus.io/stats" + + "github.com/solo-io/go-utils/errutils" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" +) + +type TestingSimpleEmitter interface { + Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) +} + +func NewTestingSimpleEmitter(aggregatedWatch clients.ResourceWatch) TestingSimpleEmitter { + return NewTestingSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) +} + +func NewTestingSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TestingSimpleEmitter { + return &testingSimpleEmitter{ + aggregatedWatch: aggregatedWatch, + forceEmit: emit, + } +} + +type testingSimpleEmitter struct { + forceEmit <-chan struct{} + aggregatedWatch clients.ResourceWatch +} + +func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSnapshot, <-chan error, error) { + snapshots := make(chan *TestingSnapshot) + errs := make(chan error) + + untyped, watchErrs, err := c.aggregatedWatch(ctx) + if err != nil { + return nil, nil, err + } + + go errutils.AggregateErrs(ctx, errs, watchErrs, "testing-emitter") + + go func() { + originalSnapshot := TestingSnapshot{} + currentSnapshot := originalSnapshot.Clone() + timer := time.NewTicker(time.Second * 1) + sync := func() { + if originalSnapshot.Hash() == currentSnapshot.Hash() { + return + } + + stats.Record(ctx, mTestingSnapshotOut.M(1)) + originalSnapshot = currentSnapshot.Clone() + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + } + + defer func() { + close(snapshots) + close(errs) + }() + + for { + record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } + + select { + case <-timer.C: + sync() + case <-ctx.Done(): + return + case <-c.forceEmit: + sentSnapshot := currentSnapshot.Clone() + snapshots <- &sentSnapshot + case untypedList := <-untyped: + record() + + currentSnapshot = TestingSnapshot{} + for _, res := range untypedList { + switch typed := res.(type) { + case *testing_solo_io.MockResource: + currentSnapshot.Mocks = append(currentSnapshot.Mocks, typed) + case *testing_solo_io.FakeResource: + currentSnapshot.Fakes = append(currentSnapshot.Fakes, typed) + case *testing_solo_io.AnotherMockResource: + currentSnapshot.Anothermockresources = append(currentSnapshot.Anothermockresources, typed) + case *testing_solo_io.ClusterResource: + currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) + case *MockCustomType: + currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) + case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: + currentSnapshot.Pods = append(currentSnapshot.Pods, typed) + default: + select { + case errs <- fmt.Errorf("TestingSnapshotEmitter "+ + "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): + case <-ctx.Done(): + return + } + } + } + + } + } + }() + return snapshots, errs, nil +} From 1b7e75e193ffe6045d96c78f637e8dd31875fd78 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Wed, 7 Aug 2019 15:57:13 -0400 Subject: [PATCH 13/17] wip --- pkg/code-generator/cmd/main.go | 9 +- pkg/code-generator/codegen/conversion.go | 4 +- pkg/code-generator/codegen/resource_group.go | 18 ++ pkg/code-generator/model/project.go | 1 + pkg/code-generator/parser/parser_resource.go | 7 +- pkg/multicluster/group/group_suite_test.go | 15 ++ .../group/kubeconfigs_event_loop_test.go | 6 +- .../group/kubeconfigs_snapshot.sk.go | 4 +- .../group/kubeconfigs_snapshot_emitter.sk.go | 18 +- .../kubeconfigs_snapshot_emitter_test.go | 42 ++-- .../kubeconfigs_snapshot_simple_emitter.sk.go | 4 +- test/mocks/api/solo-kit.json | 17 +- test/mocks/group/group_suite_test.go | 15 ++ test/mocks/group/testing_event_loop_test.go | 22 +- test/mocks/group/testing_snapshot.sk.go | 12 +- .../group/testing_snapshot_emitter.sk.go | 60 +++--- .../group/testing_snapshot_emitter_test.go | 202 ++++++++--------- .../testing_snapshot_simple_emitter.sk.go | 12 +- test/mockstwo/api/solo-kit.json | 12 +- test/mockstwo/group/group_suite_test.go | 15 ++ .../mockstwo/group/testing_event_loop_test.go | 23 +- test/mockstwo/group/testing_snapshot.sk.go | 13 +- .../group/testing_snapshot_emitter.sk.go | 61 +++--- .../group/testing_snapshot_emitter_test.go | 203 +++++++++--------- .../testing_snapshot_simple_emitter.sk.go | 13 +- 25 files changed, 449 insertions(+), 359 deletions(-) create mode 100644 pkg/multicluster/group/group_suite_test.go create mode 100644 test/mocks/group/group_suite_test.go create mode 100644 test/mockstwo/group/group_suite_test.go diff --git a/pkg/code-generator/cmd/main.go b/pkg/code-generator/cmd/main.go index 38d395dc9..3065bd777 100644 --- a/pkg/code-generator/cmd/main.go +++ b/pkg/code-generator/cmd/main.go @@ -1,7 +1,6 @@ package cmd import ( - "encoding/json" "fmt" "io/ioutil" "os" @@ -583,13 +582,7 @@ func importCustomResources(imports []string) ([]model.CustomResourceConfig, erro if !strings.HasSuffix(imp, model.ProjectConfigFilename) { imp = filepath.Join(imp, model.ProjectConfigFilename) } - byt, err := ioutil.ReadFile(imp) - if err != nil { - return nil, err - } - - var soloKitProject model.SoloKitProject - err = json.Unmarshal(byt, &soloKitProject) + soloKitProject, err := model.LoadProjectConfig(imp) if err != nil { return nil, err } diff --git a/pkg/code-generator/codegen/conversion.go b/pkg/code-generator/codegen/conversion.go index 37ed4d89c..218cc8ede 100644 --- a/pkg/code-generator/codegen/conversion.go +++ b/pkg/code-generator/codegen/conversion.go @@ -36,12 +36,12 @@ func GenerateConversionFiles(soloKitProject *model.ApiGroup, versions []*model.V // only generate files for the resources in our group, otherwise we import if !v.VersionConfig.IsOurProto(res.Filename) && !res.IsCustom { log.Printf("not generating solo-kit "+ - "clients for resource %v.%v, "+ + "converters for resource %v.%v, "+ "resource proto package must match v proto package %v", res.ProtoPackage, res.Name, v.ProtoPackage) continue } else if res.IsCustom && res.CustomResource.Imported { log.Printf("not generating solo-kit "+ - "clients for resource %v.%v, "+ + "converters for resource %v.%v, "+ "custom resources from a different v are not generated", res.GoPackage, res.Name, v.VersionConfig.GoPackage) continue } diff --git a/pkg/code-generator/codegen/resource_group.go b/pkg/code-generator/codegen/resource_group.go index a4ddfe087..11ef0ca5c 100644 --- a/pkg/code-generator/codegen/resource_group.go +++ b/pkg/code-generator/codegen/resource_group.go @@ -63,6 +63,24 @@ func generateFilesForResourceGroup(rg *model.ResourceGroup) (code_generator.File Content: content, }) } + + testSuite := &model.TestSuite{ + PackageName: rg.ApiGroup.ResourceGroupGoPackageShort, + } + for suffix, tmpl := range map[string]*template.Template{ + "_suite_test.go": templates.SimpleTestSuiteTemplate, + } { + name := testSuite.PackageName + suffix + content, err := generateTestSuiteFile(testSuite, tmpl) + if err != nil { + return nil, errors.Wrapf(err, "internal error: processing template '%v' for resource group %v failed", tmpl.ParseName, name) + } + v = append(v, code_generator.File{ + Filename: name, + Content: content, + }) + } + return v, nil } diff --git a/pkg/code-generator/model/project.go b/pkg/code-generator/model/project.go index de9ae2941..8b2b85b13 100644 --- a/pkg/code-generator/model/project.go +++ b/pkg/code-generator/model/project.go @@ -85,6 +85,7 @@ func (p VersionConfig) IsOurProto(protoFile string) bool { type ResourceConfig struct { ResourceName string `json:"name"` ResourcePackage string `json:"package"` // resource package doubles as the proto package or the go import package + ResourceVersion string `json:"version"` // version of the resource, used to distinguish when multiple versions of a resource exist } // Create a Solo-Kit backed resource from diff --git a/pkg/code-generator/parser/parser_resource.go b/pkg/code-generator/parser/parser_resource.go index 5b041f2a6..c03be36e0 100644 --- a/pkg/code-generator/parser/parser_resource.go +++ b/pkg/code-generator/parser/parser_resource.go @@ -111,10 +111,9 @@ func GetResourceGroups(apiGroup *model.ApiGroup, resources []*model.Resource) ([ } var importPrefix string - if !apiGroup.IsOurProto(resource.Filename) && !resource.IsCustom { - importPrefix = resource.ProtoPackage - } else if resource.IsCustom && resource.CustomResource.Imported { - // If is custom resource from a different version use import prefix + if !resource.IsCustom { + importPrefix = resource.ProtoPackage + "_" + resource.ParentVersion.VersionConfig.Version + } else { importPrefix = resource.CustomImportPrefix } diff --git a/pkg/multicluster/group/group_suite_test.go b/pkg/multicluster/group/group_suite_test.go new file mode 100644 index 000000000..b3611efdb --- /dev/null +++ b/pkg/multicluster/group/group_suite_test.go @@ -0,0 +1,15 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestGroup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Group Suite") +} diff --git a/pkg/multicluster/group/kubeconfigs_event_loop_test.go b/pkg/multicluster/group/kubeconfigs_event_loop_test.go index 1cc4f430b..1531804d6 100644 --- a/pkg/multicluster/group/kubeconfigs_event_loop_test.go +++ b/pkg/multicluster/group/kubeconfigs_event_loop_test.go @@ -9,6 +9,8 @@ import ( "sync" "time" + github_com_solo_io_solo_kit_api_multicluster_v1 "github.com/solo-io/solo-kit/api/multicluster/v1" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/solo-io/solo-kit/pkg/api/v1/clients" @@ -28,13 +30,13 @@ var _ = Describe("KubeconfigsEventLoop", func() { kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - kubeConfigClient, err := NewKubeConfigClient(kubeConfigClientFactory) + kubeConfigClient, err := github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfigClient(kubeConfigClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewKubeconfigsEmitter(kubeConfigClient) }) It("runs sync function on a new snapshot", func() { - _, err = emitter.KubeConfig().Write(NewKubeConfig(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.KubeConfig().Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) sync := &mockKubeconfigsSyncer{} el := NewKubeconfigsEventLoop(emitter, sync) diff --git a/pkg/multicluster/group/kubeconfigs_snapshot.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot.sk.go index 7ef8d190b..adf1b0d70 100644 --- a/pkg/multicluster/group/kubeconfigs_snapshot.sk.go +++ b/pkg/multicluster/group/kubeconfigs_snapshot.sk.go @@ -5,12 +5,14 @@ package group import ( "fmt" + github_com_solo_io_solo_kit_api_multicluster_v1 "github.com/solo-io/solo-kit/api/multicluster/v1" + "github.com/solo-io/go-utils/hashutils" "go.uber.org/zap" ) type KubeconfigsSnapshot struct { - Kubeconfigs KubeConfigList + Kubeconfigs github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList } func (s KubeconfigsSnapshot) Clone() KubeconfigsSnapshot { diff --git a/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go index 21e331797..f392d88a4 100644 --- a/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go +++ b/pkg/multicluster/group/kubeconfigs_snapshot_emitter.sk.go @@ -6,6 +6,8 @@ import ( "sync" "time" + github_com_solo_io_solo_kit_api_multicluster_v1 "github.com/solo-io/solo-kit/api/multicluster/v1" + "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" @@ -41,15 +43,15 @@ func init() { type KubeconfigsEmitter interface { Register() error - KubeConfig() KubeConfigClient + KubeConfig() github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) } -func NewKubeconfigsEmitter(kubeConfigClient KubeConfigClient) KubeconfigsEmitter { +func NewKubeconfigsEmitter(kubeConfigClient github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient) KubeconfigsEmitter { return NewKubeconfigsEmitterWithEmit(kubeConfigClient, make(chan struct{})) } -func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, emit <-chan struct{}) KubeconfigsEmitter { +func NewKubeconfigsEmitterWithEmit(kubeConfigClient github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient, emit <-chan struct{}) KubeconfigsEmitter { return &kubeconfigsEmitter{ kubeConfig: kubeConfigClient, forceEmit: emit, @@ -58,7 +60,7 @@ func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, emit <-cha type kubeconfigsEmitter struct { forceEmit <-chan struct{} - kubeConfig KubeConfigClient + kubeConfig github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient } func (c *kubeconfigsEmitter) Register() error { @@ -68,7 +70,7 @@ func (c *kubeconfigsEmitter) Register() error { return nil } -func (c *kubeconfigsEmitter) KubeConfig() KubeConfigClient { +func (c *kubeconfigsEmitter) KubeConfig() github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient { return c.kubeConfig } @@ -90,7 +92,7 @@ func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.Wa ctx := opts.Ctx /* Create channel for KubeConfig */ type kubeConfigListWithNamespace struct { - list KubeConfigList + list github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList namespace string } kubeConfigChan := make(chan kubeConfigListWithNamespace) @@ -140,7 +142,7 @@ func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.Wa sentSnapshot := currentSnapshot.Clone() snapshots <- &sentSnapshot } - kubeconfigsByNamespace := make(map[string]KubeConfigList) + kubeconfigsByNamespace := make(map[string]github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList) for { record := func() { stats.Record(ctx, mKubeconfigsSnapshotIn.M(1)) } @@ -163,7 +165,7 @@ func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.Wa // merge lists by namespace kubeconfigsByNamespace[namespace] = kubeConfigNamespacedList.list - var kubeConfigList KubeConfigList + var kubeConfigList github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList for _, kubeconfigs := range kubeconfigsByNamespace { kubeConfigList = append(kubeConfigList, kubeconfigs...) } diff --git a/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go b/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go index a28c005d4..6f0c13562 100644 --- a/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go +++ b/pkg/multicluster/group/kubeconfigs_snapshot_emitter_test.go @@ -9,6 +9,8 @@ import ( "os" "time" + github_com_solo_io_solo_kit_api_multicluster_v1 "github.com/solo-io/solo-kit/api/multicluster/v1" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/solo-io/go-utils/kubeutils" @@ -37,7 +39,7 @@ var _ = Describe("GroupEmitter", func() { name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) kube kubernetes.Interface emitter KubeconfigsEmitter - kubeConfigClient KubeConfigClient + kubeConfigClient github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigClient ) BeforeEach(func() { @@ -51,7 +53,7 @@ var _ = Describe("GroupEmitter", func() { Cache: memory.NewInMemoryResourceCache(), } - kubeConfigClient, err = NewKubeConfigClient(kubeConfigClientFactory) + kubeConfigClient, err = github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfigClient(kubeConfigClientFactory) Expect(err).NotTo(HaveOccurred()) emitter = NewKubeconfigsEmitter(kubeConfigClient) }) @@ -76,7 +78,7 @@ var _ = Describe("GroupEmitter", func() { KubeConfig */ - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + assertSnapshotkubeconfigs := func(expectkubeconfigs github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList, unexpectkubeconfigs github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList) { drain: for { select { @@ -102,32 +104,32 @@ var _ = Describe("GroupEmitter", func() { } } } - kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + kubeConfig1a, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + kubeConfig1b, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + kubeConfig2a, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + kubeConfig2b, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b}, github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig2a, kubeConfig2b}) err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + assertSnapshotkubeconfigs(nil, github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) }) It("tracks snapshots on changes to any resource using AllNamespace", func() { ctx := context.Background() @@ -146,7 +148,7 @@ var _ = Describe("GroupEmitter", func() { KubeConfig */ - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + assertSnapshotkubeconfigs := func(expectkubeconfigs github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList, unexpectkubeconfigs github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList) { drain: for { select { @@ -172,31 +174,31 @@ var _ = Describe("GroupEmitter", func() { } } } - kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + kubeConfig1a, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + kubeConfig1b, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + kubeConfig2a, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + kubeConfig2b, err := kubeConfigClient.Write(github_com_solo_io_solo_kit_api_multicluster_v1.NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + assertSnapshotkubeconfigs(github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b}, github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig2a, kubeConfig2b}) err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + assertSnapshotkubeconfigs(nil, github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) }) }) diff --git a/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go b/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go index 2905a88b6..a23ec9680 100644 --- a/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go +++ b/pkg/multicluster/group/kubeconfigs_snapshot_simple_emitter.sk.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + github_com_solo_io_solo_kit_api_multicluster_v1 "github.com/solo-io/solo-kit/api/multicluster/v1" + "go.opencensus.io/stats" "github.com/solo-io/go-utils/errutils" @@ -81,7 +83,7 @@ func (c *kubeconfigsSimpleEmitter) Snapshots(ctx context.Context) (<-chan *Kubec currentSnapshot = KubeconfigsSnapshot{} for _, res := range untypedList { switch typed := res.(type) { - case *KubeConfig: + case *github_com_solo_io_solo_kit_api_multicluster_v1.KubeConfig: currentSnapshot.Kubeconfigs = append(currentSnapshot.Kubeconfigs, typed) default: select { diff --git a/test/mocks/api/solo-kit.json b/test/mocks/api/solo-kit.json index 941f81c61..1b49e96df 100644 --- a/test/mocks/api/solo-kit.json +++ b/test/mocks/api/solo-kit.json @@ -7,29 +7,34 @@ "conversion_go_package": "github.com/solo-io/solo-kit/test/mocks/conversion", "resource_group_go_package": "github.com/solo-io/solo-kit/test/mocks/group", "imports": [ - "github.com/solo-io/solo-kit/api/external/kubernetes" + "github.com/solo-io/solo-kit/api/external/kubernetes", + "github.com/solo-io/solo-kit/test/mocks/api" ], "resource_groups": { "testing.solo.io": [ { "name": "MockResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "FakeResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1alpha1" }, { "name": "AnotherMockResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "ClusterResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "MockCustomType", - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + "package": "github.com/solo-io/solo-kit/test/mocks/v1" }, { "name": "Pod", diff --git a/test/mocks/group/group_suite_test.go b/test/mocks/group/group_suite_test.go new file mode 100644 index 000000000..b3611efdb --- /dev/null +++ b/test/mocks/group/group_suite_test.go @@ -0,0 +1,15 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestGroup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Group Suite") +} diff --git a/test/mocks/group/testing_event_loop_test.go b/test/mocks/group/testing_event_loop_test.go index 23934c80a..a6e98ce3c 100644 --- a/test/mocks/group/testing_event_loop_test.go +++ b/test/mocks/group/testing_event_loop_test.go @@ -10,6 +10,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -30,31 +32,31 @@ var _ = Describe("TestingEventLoop", func() { mockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockResourceClient, err := NewMockResourceClient(mockResourceClientFactory) + mockResourceClient, err := testing_solo_io_kubernetes.NewMockResourceClient(mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - fakeResourceClient, err := NewFakeResourceClient(fakeResourceClientFactory) + fakeResourceClient, err := testing_solo_io_kubernetes.NewFakeResourceClient(fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) anotherMockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - anotherMockResourceClient, err := NewAnotherMockResourceClient(anotherMockResourceClientFactory) + anotherMockResourceClient, err := testing_solo_io_kubernetes.NewAnotherMockResourceClient(anotherMockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) clusterResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - clusterResourceClient, err := NewClusterResourceClient(clusterResourceClientFactory) + clusterResourceClient, err := testing_solo_io_kubernetes.NewClusterResourceClient(clusterResourceClientFactory) Expect(err).NotTo(HaveOccurred()) mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err := NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err := github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) podClientFactory := &factory.MemoryResourceClientFactory{ @@ -66,15 +68,15 @@ var _ = Describe("TestingEventLoop", func() { emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) }) It("runs sync function on a new snapshot", func() { - _, err = emitter.MockResource().Write(NewMockResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.MockResource().Write(testing_solo_io_kubernetes.NewMockResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.FakeResource().Write(NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.FakeResource().Write(testing_solo_io_kubernetes.NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.AnotherMockResource().Write(NewAnotherMockResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.AnotherMockResource().Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.ClusterResource().Write(NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.ClusterResource().Write(testing_solo_io_kubernetes.NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.MockCustomType().Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) diff --git a/test/mocks/group/testing_snapshot.sk.go b/test/mocks/group/testing_snapshot.sk.go index 6046e1b36..cb18c62ae 100644 --- a/test/mocks/group/testing_snapshot.sk.go +++ b/test/mocks/group/testing_snapshot.sk.go @@ -6,17 +6,19 @@ import ( "fmt" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "github.com/solo-io/go-utils/hashutils" "go.uber.org/zap" ) type TestingSnapshot struct { - Mocks MockResourceList - Fakes FakeResourceList - Anothermockresources AnotherMockResourceList - Clusterresources ClusterResourceList - Mcts MockCustomTypeList + Mocks testing_solo_io_kubernetes.MockResourceList + Fakes testing_solo_io_kubernetes.FakeResourceList + Anothermockresources testing_solo_io_kubernetes.AnotherMockResourceList + Clusterresources testing_solo_io_kubernetes.ClusterResourceList + Mcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList } diff --git a/test/mocks/group/testing_snapshot_emitter.sk.go b/test/mocks/group/testing_snapshot_emitter.sk.go index c6961bd84..81cf511c5 100644 --- a/test/mocks/group/testing_snapshot_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_emitter.sk.go @@ -7,6 +7,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -43,20 +45,20 @@ func init() { type TestingEmitter interface { Register() error - MockResource() MockResourceClient - FakeResource() FakeResourceClient - AnotherMockResource() AnotherMockResourceClient - ClusterResource() ClusterResourceClient - MockCustomType() MockCustomTypeClient + MockResource() testing_solo_io_kubernetes.MockResourceClient + FakeResource() testing_solo_io_kubernetes.FakeResourceClient + AnotherMockResource() testing_solo_io_kubernetes.AnotherMockResourceClient + ClusterResource() testing_solo_io_kubernetes.ClusterResourceClient + MockCustomType() github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) } -func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { +func NewTestingEmitter(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, fakeResource: fakeResourceClient, @@ -70,11 +72,11 @@ func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResour type testingEmitter struct { forceEmit <-chan struct{} - mockResource MockResourceClient - fakeResource FakeResourceClient - anotherMockResource AnotherMockResourceClient - clusterResource ClusterResourceClient - mockCustomType MockCustomTypeClient + mockResource testing_solo_io_kubernetes.MockResourceClient + fakeResource testing_solo_io_kubernetes.FakeResourceClient + anotherMockResource testing_solo_io_kubernetes.AnotherMockResourceClient + clusterResource testing_solo_io_kubernetes.ClusterResourceClient + mockCustomType github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient } @@ -100,23 +102,23 @@ func (c *testingEmitter) Register() error { return nil } -func (c *testingEmitter) MockResource() MockResourceClient { +func (c *testingEmitter) MockResource() testing_solo_io_kubernetes.MockResourceClient { return c.mockResource } -func (c *testingEmitter) FakeResource() FakeResourceClient { +func (c *testingEmitter) FakeResource() testing_solo_io_kubernetes.FakeResourceClient { return c.fakeResource } -func (c *testingEmitter) AnotherMockResource() AnotherMockResourceClient { +func (c *testingEmitter) AnotherMockResource() testing_solo_io_kubernetes.AnotherMockResourceClient { return c.anotherMockResource } -func (c *testingEmitter) ClusterResource() ClusterResourceClient { +func (c *testingEmitter) ClusterResource() testing_solo_io_kubernetes.ClusterResourceClient { return c.clusterResource } -func (c *testingEmitter) MockCustomType() MockCustomTypeClient { +func (c *testingEmitter) MockCustomType() github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient { return c.mockCustomType } @@ -142,26 +144,26 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ctx := opts.Ctx /* Create channel for MockResource */ type mockResourceListWithNamespace struct { - list MockResourceList + list testing_solo_io_kubernetes.MockResourceList namespace string } mockResourceChan := make(chan mockResourceListWithNamespace) /* Create channel for FakeResource */ type fakeResourceListWithNamespace struct { - list FakeResourceList + list testing_solo_io_kubernetes.FakeResourceList namespace string } fakeResourceChan := make(chan fakeResourceListWithNamespace) /* Create channel for AnotherMockResource */ type anotherMockResourceListWithNamespace struct { - list AnotherMockResourceList + list testing_solo_io_kubernetes.AnotherMockResourceList namespace string } anotherMockResourceChan := make(chan anotherMockResourceListWithNamespace) /* Create channel for ClusterResource */ /* Create channel for MockCustomType */ type mockCustomTypeListWithNamespace struct { - list MockCustomTypeList + list github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList namespace string } mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) @@ -296,10 +298,10 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO sentSnapshot := currentSnapshot.Clone() snapshots <- &sentSnapshot } - mocksByNamespace := make(map[string]MockResourceList) - fakesByNamespace := make(map[string]FakeResourceList) - anothermockresourcesByNamespace := make(map[string]AnotherMockResourceList) - mctsByNamespace := make(map[string]MockCustomTypeList) + mocksByNamespace := make(map[string]testing_solo_io_kubernetes.MockResourceList) + fakesByNamespace := make(map[string]testing_solo_io_kubernetes.FakeResourceList) + anothermockresourcesByNamespace := make(map[string]testing_solo_io_kubernetes.AnotherMockResourceList) + mctsByNamespace := make(map[string]github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) for { @@ -323,7 +325,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList MockResourceList + var mockResourceList testing_solo_io_kubernetes.MockResourceList for _, mocks := range mocksByNamespace { mockResourceList = append(mockResourceList, mocks...) } @@ -335,7 +337,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace fakesByNamespace[namespace] = fakeResourceNamespacedList.list - var fakeResourceList FakeResourceList + var fakeResourceList testing_solo_io_kubernetes.FakeResourceList for _, fakes := range fakesByNamespace { fakeResourceList = append(fakeResourceList, fakes...) } @@ -347,7 +349,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace anothermockresourcesByNamespace[namespace] = anotherMockResourceNamespacedList.list - var anotherMockResourceList AnotherMockResourceList + var anotherMockResourceList testing_solo_io_kubernetes.AnotherMockResourceList for _, anothermockresources := range anothermockresourcesByNamespace { anotherMockResourceList = append(anotherMockResourceList, anothermockresources...) } @@ -362,7 +364,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace mctsByNamespace[namespace] = mockCustomTypeNamespacedList.list - var mockCustomTypeList MockCustomTypeList + var mockCustomTypeList github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList for _, mcts := range mctsByNamespace { mockCustomTypeList = append(mockCustomTypeList, mcts...) } diff --git a/test/mocks/group/testing_snapshot_emitter_test.go b/test/mocks/group/testing_snapshot_emitter_test.go index 420274d10..671be4850 100644 --- a/test/mocks/group/testing_snapshot_emitter_test.go +++ b/test/mocks/group/testing_snapshot_emitter_test.go @@ -10,6 +10,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -42,11 +44,11 @@ var _ = Describe("GroupEmitter", func() { cfg *rest.Config kube kubernetes.Interface emitter TestingEmitter - mockResourceClient MockResourceClient - fakeResourceClient FakeResourceClient - anotherMockResourceClient AnotherMockResourceClient - clusterResourceClient ClusterResourceClient - mockCustomTypeClient MockCustomTypeClient + mockResourceClient testing_solo_io_kubernetes.MockResourceClient + fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient + anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient + clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient + mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient ) @@ -60,46 +62,46 @@ var _ = Describe("GroupEmitter", func() { Expect(err).NotTo(HaveOccurred()) // MockResource Constructor mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, + Crd: testing_solo_io_kubernetes.MockResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - mockResourceClient, err = NewMockResourceClient(mockResourceClientFactory) + mockResourceClient, err = testing_solo_io_kubernetes.NewMockResourceClient(mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // FakeResource Constructor fakeResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: FakeResourceCrd, + Crd: testing_solo_io_kubernetes.FakeResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - fakeResourceClient, err = NewFakeResourceClient(fakeResourceClientFactory) + fakeResourceClient, err = testing_solo_io_kubernetes.NewFakeResourceClient(fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // AnotherMockResource Constructor anotherMockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: AnotherMockResourceCrd, + Crd: testing_solo_io_kubernetes.AnotherMockResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - anotherMockResourceClient, err = NewAnotherMockResourceClient(anotherMockResourceClientFactory) + anotherMockResourceClient, err = testing_solo_io_kubernetes.NewAnotherMockResourceClient(anotherMockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // ClusterResource Constructor clusterResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: ClusterResourceCrd, + Crd: testing_solo_io_kubernetes.ClusterResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - clusterResourceClient, err = NewClusterResourceClient(clusterResourceClientFactory) + clusterResourceClient, err = testing_solo_io_kubernetes.NewClusterResourceClient(clusterResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // MockCustomType Constructor mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err = NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err = github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) // Pod Constructor podClientFactory := &factory.MemoryResourceClientFactory{ @@ -133,7 +135,7 @@ var _ = Describe("GroupEmitter", func() { MockResource */ - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + assertSnapshotMocks := func(expectMocks testing_solo_io_kubernetes.MockResourceList, unexpectMocks testing_solo_io_kubernetes.MockResourceList) { drain: for { select { @@ -159,38 +161,38 @@ var _ = Describe("GroupEmitter", func() { } } } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource2b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io_kubernetes.MockResourceList{mockResource2a, mockResource2b}) err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + assertSnapshotMocks(nil, testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) /* FakeResource */ - assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + assertSnapshotFakes := func(expectFakes testing_solo_io_kubernetes.FakeResourceList, unexpectFakes testing_solo_io_kubernetes.FakeResourceList) { drain: for { select { @@ -216,38 +218,38 @@ var _ = Describe("GroupEmitter", func() { } } } - fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io_kubernetes.FakeResourceList{fakeResource2a, fakeResource2b}) err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + assertSnapshotFakes(nil, testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) /* AnotherMockResource */ - assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList) { drain: for { select { @@ -273,38 +275,38 @@ var _ = Describe("GroupEmitter", func() { } } } - anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(nil, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) /* ClusterResource */ - assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io_kubernetes.ClusterResourceList, unexpectClusterresources testing_solo_io_kubernetes.ClusterResourceList) { drain: for { select { @@ -328,30 +330,30 @@ var _ = Describe("GroupEmitter", func() { } } } - clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, testing_solo_io_kubernetes.ClusterResourceList{clusterResource2a}) err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + assertSnapshotClusterresources(nil, testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}) /* MockCustomType */ - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) { drain: for { select { @@ -377,32 +379,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod @@ -478,7 +480,7 @@ var _ = Describe("GroupEmitter", func() { MockResource */ - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + assertSnapshotMocks := func(expectMocks testing_solo_io_kubernetes.MockResourceList, unexpectMocks testing_solo_io_kubernetes.MockResourceList) { drain: for { select { @@ -504,38 +506,38 @@ var _ = Describe("GroupEmitter", func() { } } } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource2b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io_kubernetes.MockResourceList{mockResource2a, mockResource2b}) err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + assertSnapshotMocks(nil, testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) /* FakeResource */ - assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + assertSnapshotFakes := func(expectFakes testing_solo_io_kubernetes.FakeResourceList, unexpectFakes testing_solo_io_kubernetes.FakeResourceList) { drain: for { select { @@ -561,38 +563,38 @@ var _ = Describe("GroupEmitter", func() { } } } - fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io_kubernetes.FakeResourceList{fakeResource2a, fakeResource2b}) err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + assertSnapshotFakes(nil, testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) /* AnotherMockResource */ - assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList) { drain: for { select { @@ -618,38 +620,38 @@ var _ = Describe("GroupEmitter", func() { } } } - anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(nil, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) /* ClusterResource */ - assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io_kubernetes.ClusterResourceList, unexpectClusterresources testing_solo_io_kubernetes.ClusterResourceList) { drain: for { select { @@ -673,30 +675,30 @@ var _ = Describe("GroupEmitter", func() { } } } - clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, testing_solo_io_kubernetes.ClusterResourceList{clusterResource2a}) err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + assertSnapshotClusterresources(nil, testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}) /* MockCustomType */ - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) { drain: for { select { @@ -722,32 +724,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod diff --git a/test/mocks/group/testing_snapshot_simple_emitter.sk.go b/test/mocks/group/testing_snapshot_simple_emitter.sk.go index a236afcc1..802d27bc2 100644 --- a/test/mocks/group/testing_snapshot_simple_emitter.sk.go +++ b/test/mocks/group/testing_snapshot_simple_emitter.sk.go @@ -8,6 +8,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" @@ -83,15 +85,15 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn currentSnapshot = TestingSnapshot{} for _, res := range untypedList { switch typed := res.(type) { - case *MockResource: + case *testing_solo_io_kubernetes.MockResource: currentSnapshot.Mocks = append(currentSnapshot.Mocks, typed) - case *FakeResource: + case *testing_solo_io_kubernetes.FakeResource: currentSnapshot.Fakes = append(currentSnapshot.Fakes, typed) - case *AnotherMockResource: + case *testing_solo_io_kubernetes.AnotherMockResource: currentSnapshot.Anothermockresources = append(currentSnapshot.Anothermockresources, typed) - case *ClusterResource: + case *testing_solo_io_kubernetes.ClusterResource: currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) - case *MockCustomType: + case *github_com_solo_io_solo_kit_test_mocks_v1.MockCustomType: currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: currentSnapshot.Pods = append(currentSnapshot.Pods, typed) diff --git a/test/mockstwo/api/solo-kit.json b/test/mockstwo/api/solo-kit.json index 40354f64d..b65bd1275 100644 --- a/test/mockstwo/api/solo-kit.json +++ b/test/mockstwo/api/solo-kit.json @@ -12,19 +12,23 @@ "testing.solo.io": [ { "name": "MockResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "FakeResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "AnotherMockResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "ClusterResource", - "package": "testing.solo.io" + "package": "testing.solo.io", + "version": "v1" }, { "name": "MockCustomType", diff --git a/test/mockstwo/group/group_suite_test.go b/test/mockstwo/group/group_suite_test.go new file mode 100644 index 000000000..b3611efdb --- /dev/null +++ b/test/mockstwo/group/group_suite_test.go @@ -0,0 +1,15 @@ +// Code generated by solo-kit. DO NOT EDIT. + +package group_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestGroup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Group Suite") +} diff --git a/test/mockstwo/group/testing_event_loop_test.go b/test/mockstwo/group/testing_event_loop_test.go index 344294f2d..356e0e781 100644 --- a/test/mockstwo/group/testing_event_loop_test.go +++ b/test/mockstwo/group/testing_event_loop_test.go @@ -10,7 +10,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -31,31 +32,31 @@ var _ = Describe("TestingEventLoop", func() { mockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockResourceClient, err := testing_solo_io.NewMockResourceClient(mockResourceClientFactory) + mockResourceClient, err := testing_solo_io_kubernetes.NewMockResourceClient(mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - fakeResourceClient, err := testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) + fakeResourceClient, err := testing_solo_io_kubernetes.NewFakeResourceClient(fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) anotherMockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - anotherMockResourceClient, err := testing_solo_io.NewAnotherMockResourceClient(anotherMockResourceClientFactory) + anotherMockResourceClient, err := testing_solo_io_kubernetes.NewAnotherMockResourceClient(anotherMockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) clusterResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - clusterResourceClient, err := testing_solo_io.NewClusterResourceClient(clusterResourceClientFactory) + clusterResourceClient, err := testing_solo_io_kubernetes.NewClusterResourceClient(clusterResourceClientFactory) Expect(err).NotTo(HaveOccurred()) mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err := NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err := github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) podClientFactory := &factory.MemoryResourceClientFactory{ @@ -67,15 +68,15 @@ var _ = Describe("TestingEventLoop", func() { emitter = NewTestingEmitter(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient) }) It("runs sync function on a new snapshot", func() { - _, err = emitter.MockResource().Write(testing_solo_io.NewMockResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.MockResource().Write(testing_solo_io_kubernetes.NewMockResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.FakeResource().Write(testing_solo_io.NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.FakeResource().Write(testing_solo_io_kubernetes.NewFakeResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.AnotherMockResource().Write(testing_solo_io.NewAnotherMockResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.AnotherMockResource().Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.ClusterResource().Write(testing_solo_io.NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.ClusterResource().Write(testing_solo_io_kubernetes.NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.MockCustomType().Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) diff --git a/test/mockstwo/group/testing_snapshot.sk.go b/test/mockstwo/group/testing_snapshot.sk.go index 87db91fc5..702ba0f2e 100644 --- a/test/mockstwo/group/testing_snapshot.sk.go +++ b/test/mockstwo/group/testing_snapshot.sk.go @@ -6,18 +6,19 @@ import ( "fmt" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "github.com/solo-io/go-utils/hashutils" "go.uber.org/zap" ) type TestingSnapshot struct { - Mocks testing_solo_io.MockResourceList - Fakes testing_solo_io.FakeResourceList - Anothermockresources testing_solo_io.AnotherMockResourceList - Clusterresources testing_solo_io.ClusterResourceList - Mcts MockCustomTypeList + Mocks testing_solo_io_kubernetes.MockResourceList + Fakes testing_solo_io_kubernetes.FakeResourceList + Anothermockresources testing_solo_io_kubernetes.AnotherMockResourceList + Clusterresources testing_solo_io_kubernetes.ClusterResourceList + Mcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList } diff --git a/test/mockstwo/group/testing_snapshot_emitter.sk.go b/test/mockstwo/group/testing_snapshot_emitter.sk.go index 9a55d827a..4faefe20d 100644 --- a/test/mockstwo/group/testing_snapshot_emitter.sk.go +++ b/test/mockstwo/group/testing_snapshot_emitter.sk.go @@ -7,7 +7,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -44,20 +45,20 @@ func init() { type TestingEmitter interface { Register() error - MockResource() testing_solo_io.MockResourceClient - FakeResource() testing_solo_io.FakeResourceClient - AnotherMockResource() testing_solo_io.AnotherMockResourceClient - ClusterResource() testing_solo_io.ClusterResourceClient - MockCustomType() MockCustomTypeClient + MockResource() testing_solo_io_kubernetes.MockResourceClient + FakeResource() testing_solo_io_kubernetes.FakeResourceClient + AnotherMockResource() testing_solo_io_kubernetes.AnotherMockResourceClient + ClusterResource() testing_solo_io_kubernetes.ClusterResourceClient + MockCustomType() github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) } -func NewTestingEmitter(mockResourceClient testing_solo_io.MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, anotherMockResourceClient testing_solo_io.AnotherMockResourceClient, clusterResourceClient testing_solo_io.ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { +func NewTestingEmitter(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io.MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, anotherMockResourceClient testing_solo_io.AnotherMockResourceClient, clusterResourceClient testing_solo_io.ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, fakeResource: fakeResourceClient, @@ -71,11 +72,11 @@ func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io.MockResourceCl type testingEmitter struct { forceEmit <-chan struct{} - mockResource testing_solo_io.MockResourceClient - fakeResource testing_solo_io.FakeResourceClient - anotherMockResource testing_solo_io.AnotherMockResourceClient - clusterResource testing_solo_io.ClusterResourceClient - mockCustomType MockCustomTypeClient + mockResource testing_solo_io_kubernetes.MockResourceClient + fakeResource testing_solo_io_kubernetes.FakeResourceClient + anotherMockResource testing_solo_io_kubernetes.AnotherMockResourceClient + clusterResource testing_solo_io_kubernetes.ClusterResourceClient + mockCustomType github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient } @@ -101,23 +102,23 @@ func (c *testingEmitter) Register() error { return nil } -func (c *testingEmitter) MockResource() testing_solo_io.MockResourceClient { +func (c *testingEmitter) MockResource() testing_solo_io_kubernetes.MockResourceClient { return c.mockResource } -func (c *testingEmitter) FakeResource() testing_solo_io.FakeResourceClient { +func (c *testingEmitter) FakeResource() testing_solo_io_kubernetes.FakeResourceClient { return c.fakeResource } -func (c *testingEmitter) AnotherMockResource() testing_solo_io.AnotherMockResourceClient { +func (c *testingEmitter) AnotherMockResource() testing_solo_io_kubernetes.AnotherMockResourceClient { return c.anotherMockResource } -func (c *testingEmitter) ClusterResource() testing_solo_io.ClusterResourceClient { +func (c *testingEmitter) ClusterResource() testing_solo_io_kubernetes.ClusterResourceClient { return c.clusterResource } -func (c *testingEmitter) MockCustomType() MockCustomTypeClient { +func (c *testingEmitter) MockCustomType() github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient { return c.mockCustomType } @@ -143,26 +144,26 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ctx := opts.Ctx /* Create channel for MockResource */ type mockResourceListWithNamespace struct { - list testing_solo_io.MockResourceList + list testing_solo_io_kubernetes.MockResourceList namespace string } mockResourceChan := make(chan mockResourceListWithNamespace) /* Create channel for FakeResource */ type fakeResourceListWithNamespace struct { - list testing_solo_io.FakeResourceList + list testing_solo_io_kubernetes.FakeResourceList namespace string } fakeResourceChan := make(chan fakeResourceListWithNamespace) /* Create channel for AnotherMockResource */ type anotherMockResourceListWithNamespace struct { - list testing_solo_io.AnotherMockResourceList + list testing_solo_io_kubernetes.AnotherMockResourceList namespace string } anotherMockResourceChan := make(chan anotherMockResourceListWithNamespace) /* Create channel for ClusterResource */ /* Create channel for MockCustomType */ type mockCustomTypeListWithNamespace struct { - list MockCustomTypeList + list github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList namespace string } mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) @@ -297,10 +298,10 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO sentSnapshot := currentSnapshot.Clone() snapshots <- &sentSnapshot } - mocksByNamespace := make(map[string]testing_solo_io.MockResourceList) - fakesByNamespace := make(map[string]testing_solo_io.FakeResourceList) - anothermockresourcesByNamespace := make(map[string]testing_solo_io.AnotherMockResourceList) - mctsByNamespace := make(map[string]MockCustomTypeList) + mocksByNamespace := make(map[string]testing_solo_io_kubernetes.MockResourceList) + fakesByNamespace := make(map[string]testing_solo_io_kubernetes.FakeResourceList) + anothermockresourcesByNamespace := make(map[string]testing_solo_io_kubernetes.AnotherMockResourceList) + mctsByNamespace := make(map[string]github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) for { @@ -324,7 +325,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList testing_solo_io.MockResourceList + var mockResourceList testing_solo_io_kubernetes.MockResourceList for _, mocks := range mocksByNamespace { mockResourceList = append(mockResourceList, mocks...) } @@ -336,7 +337,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace fakesByNamespace[namespace] = fakeResourceNamespacedList.list - var fakeResourceList testing_solo_io.FakeResourceList + var fakeResourceList testing_solo_io_kubernetes.FakeResourceList for _, fakes := range fakesByNamespace { fakeResourceList = append(fakeResourceList, fakes...) } @@ -348,7 +349,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace anothermockresourcesByNamespace[namespace] = anotherMockResourceNamespacedList.list - var anotherMockResourceList testing_solo_io.AnotherMockResourceList + var anotherMockResourceList testing_solo_io_kubernetes.AnotherMockResourceList for _, anothermockresources := range anothermockresourcesByNamespace { anotherMockResourceList = append(anotherMockResourceList, anothermockresources...) } @@ -363,7 +364,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace mctsByNamespace[namespace] = mockCustomTypeNamespacedList.list - var mockCustomTypeList MockCustomTypeList + var mockCustomTypeList github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList for _, mcts := range mctsByNamespace { mockCustomTypeList = append(mockCustomTypeList, mcts...) } diff --git a/test/mockstwo/group/testing_snapshot_emitter_test.go b/test/mockstwo/group/testing_snapshot_emitter_test.go index 1ddab63e7..4a3c76255 100644 --- a/test/mockstwo/group/testing_snapshot_emitter_test.go +++ b/test/mockstwo/group/testing_snapshot_emitter_test.go @@ -10,7 +10,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -43,11 +44,11 @@ var _ = Describe("GroupEmitter", func() { cfg *rest.Config kube kubernetes.Interface emitter TestingEmitter - mockResourceClient testing_solo_io.MockResourceClient - fakeResourceClient testing_solo_io.FakeResourceClient - anotherMockResourceClient testing_solo_io.AnotherMockResourceClient - clusterResourceClient testing_solo_io.ClusterResourceClient - mockCustomTypeClient MockCustomTypeClient + mockResourceClient testing_solo_io_kubernetes.MockResourceClient + fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient + anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient + clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient + mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient ) @@ -61,46 +62,46 @@ var _ = Describe("GroupEmitter", func() { Expect(err).NotTo(HaveOccurred()) // MockResource Constructor mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: testing_solo_io.MockResourceCrd, + Crd: testing_solo_io_kubernetes.MockResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - mockResourceClient, err = testing_solo_io.NewMockResourceClient(mockResourceClientFactory) + mockResourceClient, err = testing_solo_io_kubernetes.NewMockResourceClient(mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // FakeResource Constructor fakeResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: testing_solo_io.FakeResourceCrd, + Crd: testing_solo_io_kubernetes.FakeResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - fakeResourceClient, err = testing_solo_io.NewFakeResourceClient(fakeResourceClientFactory) + fakeResourceClient, err = testing_solo_io_kubernetes.NewFakeResourceClient(fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // AnotherMockResource Constructor anotherMockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: testing_solo_io.AnotherMockResourceCrd, + Crd: testing_solo_io_kubernetes.AnotherMockResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - anotherMockResourceClient, err = testing_solo_io.NewAnotherMockResourceClient(anotherMockResourceClientFactory) + anotherMockResourceClient, err = testing_solo_io_kubernetes.NewAnotherMockResourceClient(anotherMockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // ClusterResource Constructor clusterResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: testing_solo_io.ClusterResourceCrd, + Crd: testing_solo_io_kubernetes.ClusterResourceCrd, Cfg: cfg, SharedCache: kuberc.NewKubeCache(context.TODO()), } - clusterResourceClient, err = testing_solo_io.NewClusterResourceClient(clusterResourceClientFactory) + clusterResourceClient, err = testing_solo_io_kubernetes.NewClusterResourceClient(clusterResourceClientFactory) Expect(err).NotTo(HaveOccurred()) // MockCustomType Constructor mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err = NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err = github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) // Pod Constructor podClientFactory := &factory.MemoryResourceClientFactory{ @@ -134,7 +135,7 @@ var _ = Describe("GroupEmitter", func() { MockResource */ - assertSnapshotMocks := func(expectMocks testing_solo_io.MockResourceList, unexpectMocks testing_solo_io.MockResourceList) { + assertSnapshotMocks := func(expectMocks testing_solo_io_kubernetes.MockResourceList, unexpectMocks testing_solo_io_kubernetes.MockResourceList) { drain: for { select { @@ -160,38 +161,38 @@ var _ = Describe("GroupEmitter", func() { } } } - mockResource1a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource2b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io.MockResourceList{mockResource2a, mockResource2b}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io_kubernetes.MockResourceList{mockResource2a, mockResource2b}) err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(nil, testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + assertSnapshotMocks(nil, testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) /* FakeResource */ - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + assertSnapshotFakes := func(expectFakes testing_solo_io_kubernetes.FakeResourceList, unexpectFakes testing_solo_io_kubernetes.FakeResourceList) { drain: for { select { @@ -217,38 +218,38 @@ var _ = Describe("GroupEmitter", func() { } } } - fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io_kubernetes.FakeResourceList{fakeResource2a, fakeResource2b}) err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + assertSnapshotFakes(nil, testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) /* AnotherMockResource */ - assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io.AnotherMockResourceList) { + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList) { drain: for { select { @@ -274,38 +275,38 @@ var _ = Describe("GroupEmitter", func() { } } } - anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(nil, testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(nil, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) /* ClusterResource */ - assertSnapshotClusterresources := func(expectClusterresources testing_solo_io.ClusterResourceList, unexpectClusterresources testing_solo_io.ClusterResourceList) { + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io_kubernetes.ClusterResourceList, unexpectClusterresources testing_solo_io_kubernetes.ClusterResourceList) { drain: for { select { @@ -329,30 +330,30 @@ var _ = Describe("GroupEmitter", func() { } } } - clusterResource1a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, testing_solo_io.ClusterResourceList{clusterResource2a}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, testing_solo_io_kubernetes.ClusterResourceList{clusterResource2a}) err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(nil, testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}) + assertSnapshotClusterresources(nil, testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}) /* MockCustomType */ - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) { drain: for { select { @@ -378,32 +379,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod @@ -479,7 +480,7 @@ var _ = Describe("GroupEmitter", func() { MockResource */ - assertSnapshotMocks := func(expectMocks testing_solo_io.MockResourceList, unexpectMocks testing_solo_io.MockResourceList) { + assertSnapshotMocks := func(expectMocks testing_solo_io_kubernetes.MockResourceList, unexpectMocks testing_solo_io_kubernetes.MockResourceList) { drain: for { select { @@ -505,38 +506,38 @@ var _ = Describe("GroupEmitter", func() { } } } - mockResource1a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockResource1b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(testing_solo_io.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource2b, err := mockResourceClient.Write(testing_solo_io_kubernetes.NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(testing_solo_io.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io.MockResourceList{mockResource2a, mockResource2b}) + assertSnapshotMocks(testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b}, testing_solo_io_kubernetes.MockResourceList{mockResource2a, mockResource2b}) err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(nil, testing_solo_io.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + assertSnapshotMocks(nil, testing_solo_io_kubernetes.MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) /* FakeResource */ - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + assertSnapshotFakes := func(expectFakes testing_solo_io_kubernetes.FakeResourceList, unexpectFakes testing_solo_io_kubernetes.FakeResourceList) { drain: for { select { @@ -562,38 +563,38 @@ var _ = Describe("GroupEmitter", func() { } } } - fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io_kubernetes.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + assertSnapshotFakes(testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io_kubernetes.FakeResourceList{fakeResource2a, fakeResource2b}) err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + assertSnapshotFakes(nil, testing_solo_io_kubernetes.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) /* AnotherMockResource */ - assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io.AnotherMockResourceList) { + assertSnapshotAnothermockresources := func(expectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList, unexpectAnothermockresources testing_solo_io_kubernetes.AnotherMockResourceList) { drain: for { select { @@ -619,38 +620,38 @@ var _ = Describe("GroupEmitter", func() { } } } - anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + anotherMockResource1b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource2b, err := anotherMockResourceClient.Write(testing_solo_io_kubernetes.NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(nil, testing_solo_io.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + assertSnapshotAnothermockresources(nil, testing_solo_io_kubernetes.AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) /* ClusterResource */ - assertSnapshotClusterresources := func(expectClusterresources testing_solo_io.ClusterResourceList, unexpectClusterresources testing_solo_io.ClusterResourceList) { + assertSnapshotClusterresources := func(expectClusterresources testing_solo_io_kubernetes.ClusterResourceList, unexpectClusterresources testing_solo_io_kubernetes.ClusterResourceList) { drain: for { select { @@ -674,30 +675,30 @@ var _ = Describe("GroupEmitter", func() { } } } - clusterResource1a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + clusterResource1a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(testing_solo_io.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(testing_solo_io_kubernetes.NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}, nil) err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(testing_solo_io.ClusterResourceList{clusterResource1a}, testing_solo_io.ClusterResourceList{clusterResource2a}) + assertSnapshotClusterresources(testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a}, testing_solo_io_kubernetes.ClusterResourceList{clusterResource2a}) err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(nil, testing_solo_io.ClusterResourceList{clusterResource1a, clusterResource2a}) + assertSnapshotClusterresources(nil, testing_solo_io_kubernetes.ClusterResourceList{clusterResource1a, clusterResource2a}) /* MockCustomType */ - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) { drain: for { select { @@ -723,32 +724,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod diff --git a/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go index c4d0dddb5..8d2c992cc 100644 --- a/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go +++ b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go @@ -8,7 +8,8 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" + github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" @@ -84,15 +85,15 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn currentSnapshot = TestingSnapshot{} for _, res := range untypedList { switch typed := res.(type) { - case *testing_solo_io.MockResource: + case *testing_solo_io_kubernetes.MockResource: currentSnapshot.Mocks = append(currentSnapshot.Mocks, typed) - case *testing_solo_io.FakeResource: + case *testing_solo_io_kubernetes.FakeResource: currentSnapshot.Fakes = append(currentSnapshot.Fakes, typed) - case *testing_solo_io.AnotherMockResource: + case *testing_solo_io_kubernetes.AnotherMockResource: currentSnapshot.Anothermockresources = append(currentSnapshot.Anothermockresources, typed) - case *testing_solo_io.ClusterResource: + case *testing_solo_io_kubernetes.ClusterResource: currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) - case *MockCustomType: + case *github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomType: currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: currentSnapshot.Pods = append(currentSnapshot.Pods, typed) From fbce861c321f6e675abdc9bb930d04e8f6c9d85b Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Wed, 7 Aug 2019 17:03:29 -0400 Subject: [PATCH 14/17] mockstwo --- .../templates/simple_test_suite_template.go | 2 +- pkg/multicluster/group/group_suite_test.go | 2 +- .../mocks/conversion/conversion_suite_test.go | 2 +- test/mocks/group/group_suite_test.go | 2 +- test/mockstwo/api/solo-kit.json | 5 ++- test/mockstwo/group/group_suite_test.go | 2 +- .../mockstwo/group/testing_event_loop_test.go | 6 +-- test/mockstwo/group/testing_snapshot.sk.go | 4 +- .../group/testing_snapshot_emitter.sk.go | 20 ++++----- .../group/testing_snapshot_emitter_test.go | 42 +++++++++---------- .../testing_snapshot_simple_emitter.sk.go | 4 +- 11 files changed, 46 insertions(+), 45 deletions(-) diff --git a/pkg/code-generator/codegen/templates/simple_test_suite_template.go b/pkg/code-generator/codegen/templates/simple_test_suite_template.go index 31504f7a1..172782012 100644 --- a/pkg/code-generator/codegen/templates/simple_test_suite_template.go +++ b/pkg/code-generator/codegen/templates/simple_test_suite_template.go @@ -4,7 +4,7 @@ import ( "text/template" ) -var SimpleTestSuiteTemplate = template.Must(template.New("project_template").Funcs(Funcs).Parse(`package {{ .PackageName }}_test +var SimpleTestSuiteTemplate = template.Must(template.New("project_template").Funcs(Funcs).Parse(`package {{ .PackageName }} import ( "testing" diff --git a/pkg/multicluster/group/group_suite_test.go b/pkg/multicluster/group/group_suite_test.go index b3611efdb..8d2ddf537 100644 --- a/pkg/multicluster/group/group_suite_test.go +++ b/pkg/multicluster/group/group_suite_test.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package group_test +package group import ( "testing" diff --git a/test/mocks/conversion/conversion_suite_test.go b/test/mocks/conversion/conversion_suite_test.go index 6dafe4cbe..ea9e7b2f1 100644 --- a/test/mocks/conversion/conversion_suite_test.go +++ b/test/mocks/conversion/conversion_suite_test.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package conversion_test +package conversion import ( "testing" diff --git a/test/mocks/group/group_suite_test.go b/test/mocks/group/group_suite_test.go index b3611efdb..8d2ddf537 100644 --- a/test/mocks/group/group_suite_test.go +++ b/test/mocks/group/group_suite_test.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package group_test +package group import ( "testing" diff --git a/test/mockstwo/api/solo-kit.json b/test/mockstwo/api/solo-kit.json index b65bd1275..0565e791c 100644 --- a/test/mockstwo/api/solo-kit.json +++ b/test/mockstwo/api/solo-kit.json @@ -6,7 +6,8 @@ "docs_dir": "test/mockstwo/docs", "resource_group_go_package": "github.com/solo-io/solo-kit/test/mockstwo/group", "imports": [ - "github.com/solo-io/solo-kit/api/external/kubernetes" + "github.com/solo-io/solo-kit/api/external/kubernetes", + "github.com/solo-io/solo-kit/test/mocks/api" ], "resource_groups": { "testing.solo.io": [ @@ -32,7 +33,7 @@ }, { "name": "MockCustomType", - "package": "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + "package": "github.com/solo-io/solo-kit/test/mocks/v1" }, { "name": "Pod", diff --git a/test/mockstwo/group/group_suite_test.go b/test/mockstwo/group/group_suite_test.go index b3611efdb..8d2ddf537 100644 --- a/test/mockstwo/group/group_suite_test.go +++ b/test/mockstwo/group/group_suite_test.go @@ -1,6 +1,6 @@ // Code generated by solo-kit. DO NOT EDIT. -package group_test +package group import ( "testing" diff --git a/test/mockstwo/group/testing_event_loop_test.go b/test/mockstwo/group/testing_event_loop_test.go index 356e0e781..a6e98ce3c 100644 --- a/test/mockstwo/group/testing_event_loop_test.go +++ b/test/mockstwo/group/testing_event_loop_test.go @@ -10,7 +10,7 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" @@ -56,7 +56,7 @@ var _ = Describe("TestingEventLoop", func() { mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err := github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err := github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) podClientFactory := &factory.MemoryResourceClientFactory{ @@ -76,7 +76,7 @@ var _ = Describe("TestingEventLoop", func() { Expect(err).NotTo(HaveOccurred()) _, err = emitter.ClusterResource().Write(testing_solo_io_kubernetes.NewClusterResource(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) - _, err = emitter.MockCustomType().Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) + _, err = emitter.MockCustomType().Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) diff --git a/test/mockstwo/group/testing_snapshot.sk.go b/test/mockstwo/group/testing_snapshot.sk.go index 702ba0f2e..cb18c62ae 100644 --- a/test/mockstwo/group/testing_snapshot.sk.go +++ b/test/mockstwo/group/testing_snapshot.sk.go @@ -6,7 +6,7 @@ import ( "fmt" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "github.com/solo-io/go-utils/hashutils" @@ -18,7 +18,7 @@ type TestingSnapshot struct { Fakes testing_solo_io_kubernetes.FakeResourceList Anothermockresources testing_solo_io_kubernetes.AnotherMockResourceList Clusterresources testing_solo_io_kubernetes.ClusterResourceList - Mcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList + Mcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList Pods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList } diff --git a/test/mockstwo/group/testing_snapshot_emitter.sk.go b/test/mockstwo/group/testing_snapshot_emitter.sk.go index 14e5789a1..8279593d9 100644 --- a/test/mockstwo/group/testing_snapshot_emitter.sk.go +++ b/test/mockstwo/group/testing_snapshot_emitter.sk.go @@ -7,7 +7,7 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" @@ -57,16 +57,16 @@ type TestingEmitter interface { FakeResource() testing_solo_io_kubernetes.FakeResourceClient AnotherMockResource() testing_solo_io_kubernetes.AnotherMockResourceClient ClusterResource() testing_solo_io_kubernetes.ClusterResourceClient - MockCustomType() github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient + MockCustomType() github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) } -func NewTestingEmitter(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { +func NewTestingEmitter(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, podClient, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient testing_solo_io_kubernetes.MockResourceClient, fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient, anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient, clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient, mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, fakeResource: fakeResourceClient, @@ -84,7 +84,7 @@ type testingEmitter struct { fakeResource testing_solo_io_kubernetes.FakeResourceClient anotherMockResource testing_solo_io_kubernetes.AnotherMockResourceClient clusterResource testing_solo_io_kubernetes.ClusterResourceClient - mockCustomType github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient + mockCustomType github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient } @@ -126,7 +126,7 @@ func (c *testingEmitter) ClusterResource() testing_solo_io_kubernetes.ClusterRes return c.clusterResource } -func (c *testingEmitter) MockCustomType() github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient { +func (c *testingEmitter) MockCustomType() github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient { return c.mockCustomType } @@ -177,12 +177,12 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO /* Create channel for ClusterResource */ /* Create channel for MockCustomType */ type mockCustomTypeListWithNamespace struct { - list github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList + list github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList namespace string } mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) - var initialMockCustomTypeList github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList + var initialMockCustomTypeList github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList /* Create channel for Pod */ type podListWithNamespace struct { list github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList @@ -378,7 +378,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO mocksByNamespace := make(map[string]testing_solo_io_kubernetes.MockResourceList) fakesByNamespace := make(map[string]testing_solo_io_kubernetes.FakeResourceList) anothermockresourcesByNamespace := make(map[string]testing_solo_io_kubernetes.AnotherMockResourceList) - mctsByNamespace := make(map[string]github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) + mctsByNamespace := make(map[string]github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) for { @@ -441,7 +441,7 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO // merge lists by namespace mctsByNamespace[namespace] = mockCustomTypeNamespacedList.list - var mockCustomTypeList github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList + var mockCustomTypeList github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList for _, mcts := range mctsByNamespace { mockCustomTypeList = append(mockCustomTypeList, mcts...) } diff --git a/test/mockstwo/group/testing_snapshot_emitter_test.go b/test/mockstwo/group/testing_snapshot_emitter_test.go index 4a3c76255..671be4850 100644 --- a/test/mockstwo/group/testing_snapshot_emitter_test.go +++ b/test/mockstwo/group/testing_snapshot_emitter_test.go @@ -10,7 +10,7 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" . "github.com/onsi/ginkgo" @@ -48,7 +48,7 @@ var _ = Describe("GroupEmitter", func() { fakeResourceClient testing_solo_io_kubernetes.FakeResourceClient anotherMockResourceClient testing_solo_io_kubernetes.AnotherMockResourceClient clusterResourceClient testing_solo_io_kubernetes.ClusterResourceClient - mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeClient + mockCustomTypeClient github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeClient podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient ) @@ -101,7 +101,7 @@ var _ = Describe("GroupEmitter", func() { Cache: memory.NewInMemoryResourceCache(), } - mockCustomTypeClient, err = github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomTypeClient(mockCustomTypeClientFactory) + mockCustomTypeClient, err = github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomTypeClient(mockCustomTypeClientFactory) Expect(err).NotTo(HaveOccurred()) // Pod Constructor podClientFactory := &factory.MemoryResourceClientFactory{ @@ -353,7 +353,7 @@ var _ = Describe("GroupEmitter", func() { MockCustomType */ - assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) { drain: for { select { @@ -379,32 +379,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod @@ -698,7 +698,7 @@ var _ = Describe("GroupEmitter", func() { MockCustomType */ - assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList) { + assertSnapshotmcts := func(expectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList, unexpectmcts github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList) { drain: for { select { @@ -724,32 +724,32 @@ var _ = Describe("GroupEmitter", func() { } } } - mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomType1b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType2b, err := mockCustomTypeClient.Write(github_com_solo_io_solo_kit_test_mocks_v1.NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b}, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType2a, mockCustomType2b}) err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + assertSnapshotmcts(nil, github_com_solo_io_solo_kit_test_mocks_v1.MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) /* Pod diff --git a/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go index 8d2c992cc..802d27bc2 100644 --- a/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go +++ b/test/mockstwo/group/testing_snapshot_simple_emitter.sk.go @@ -8,7 +8,7 @@ import ( "time" github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" - github_com_solo_io_solo_kit_test_mocks_api_v1_customtype "github.com/solo-io/solo-kit/test/mocks/api/v1/customtype" + github_com_solo_io_solo_kit_test_mocks_v1 "github.com/solo-io/solo-kit/test/mocks/v1" testing_solo_io_kubernetes "github.com/solo-io/solo-kit/test/mocks/v1" "go.opencensus.io/stats" @@ -93,7 +93,7 @@ func (c *testingSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TestingSn currentSnapshot.Anothermockresources = append(currentSnapshot.Anothermockresources, typed) case *testing_solo_io_kubernetes.ClusterResource: currentSnapshot.Clusterresources = append(currentSnapshot.Clusterresources, typed) - case *github_com_solo_io_solo_kit_test_mocks_api_v1_customtype.MockCustomType: + case *github_com_solo_io_solo_kit_test_mocks_v1.MockCustomType: currentSnapshot.Mcts = append(currentSnapshot.Mcts, typed) case *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod: currentSnapshot.Pods = append(currentSnapshot.Pods, typed) From 6e8b12ecf2bde69f433b274101fe04a61f938be8 Mon Sep 17 00:00:00 2001 From: Joe Kelley Date: Wed, 7 Aug 2019 17:20:56 -0400 Subject: [PATCH 15/17] regen, lost some docs --- .../doc/envoy/api/v2/discovery.proto.sk.md | 186 ---- .../api/doc/envoy/type/percent.proto.sk.md | 88 -- .../solo-kit/api/v1/metadata.proto.sk.md | 57 -- .../solo-kit/api/v1/solo-kit.proto.sk.md | 51 -- .../solo-kit/api/v1/status.proto.sk.md | 69 -- test/mocks/api/doc/gogoproto/gogo.proto.sk.md | 51 -- .../doc/google/api/annotations.proto.sk.md | 37 - .../mocks/api/doc/google/api/http.proto.sk.md | 338 ------- .../google/protobuf/descriptor.proto.sk.md | 832 ------------------ .../doc/google/protobuf/duration.proto.sk.md | 137 --- .../doc/google/protobuf/struct.proto.sk.md | 158 ---- .../doc/google/protobuf/timestamp.proto.sk.md | 155 ---- .../doc/google/protobuf/wrappers.proto.sk.md | 252 ------ .../api/doc/google/rpc/status.proto.sk.md | 117 --- .../api/doc/testing.solo.io.project.sk.md | 25 - .../envoy/api/v2/core/base.proto.sk.md | 77 +- .../envoy/api/v2/discovery.proto.sk.md | 2 +- .../external/google/protobuf/any.proto.sk.md | 141 --- .../google/protobuf/descriptor.proto.sk.md | 2 +- .../external/google/rpc/status.proto.sk.md | 2 +- .../api/v1/apiserver/api_server.proto.sk.md | 4 +- .../google/protobuf/any.proto.sk.md | 11 +- .../v1alpha1/testing_snapshot_emitter.sk.go | 204 ----- .../v2alpha1/testing_snapshot_emitter.sk.go | 263 ------ 24 files changed, 42 insertions(+), 3217 deletions(-) delete mode 100644 test/mocks/api/doc/envoy/api/v2/discovery.proto.sk.md delete mode 100644 test/mocks/api/doc/envoy/type/percent.proto.sk.md delete mode 100644 test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md delete mode 100644 test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md delete mode 100644 test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md delete mode 100644 test/mocks/api/doc/gogoproto/gogo.proto.sk.md delete mode 100644 test/mocks/api/doc/google/api/annotations.proto.sk.md delete mode 100644 test/mocks/api/doc/google/api/http.proto.sk.md delete mode 100644 test/mocks/api/doc/google/protobuf/descriptor.proto.sk.md delete mode 100644 test/mocks/api/doc/google/protobuf/duration.proto.sk.md delete mode 100644 test/mocks/api/doc/google/protobuf/struct.proto.sk.md delete mode 100644 test/mocks/api/doc/google/protobuf/timestamp.proto.sk.md delete mode 100644 test/mocks/api/doc/google/protobuf/wrappers.proto.sk.md delete mode 100644 test/mocks/api/doc/google/rpc/status.proto.sk.md delete mode 100644 test/mocks/api/doc/testing.solo.io.project.sk.md rename test/mocks/{api/doc => docs}/envoy/api/v2/core/base.proto.sk.md (85%) delete mode 100644 test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md rename test/mocks/docs/{ => github.com/solo-io/solo-kit/api/external}/google/protobuf/descriptor.proto.sk.md (99%) rename test/mocks/{api/doc => docs}/google/protobuf/any.proto.sk.md (98%) delete mode 100644 test/mocks/v1alpha1/testing_snapshot_emitter.sk.go delete mode 100644 test/mocks/v2alpha1/testing_snapshot_emitter.sk.go diff --git a/test/mocks/api/doc/envoy/api/v2/discovery.proto.sk.md b/test/mocks/api/doc/envoy/api/v2/discovery.proto.sk.md deleted file mode 100644 index 8fdf09be3..000000000 --- a/test/mocks/api/doc/envoy/api/v2/discovery.proto.sk.md +++ /dev/null @@ -1,186 +0,0 @@ - ---- -title: "discovery.proto" -weight: 5 ---- - - - - -### Package: `envoy.api.v2` -#### Types: - - -- [DiscoveryRequest](#discoveryrequest) -- [DiscoveryResponse](#discoveryresponse) -- [DeltaDiscoveryRequest](#deltadiscoveryrequest) -- [DeltaDiscoveryResponse](#deltadiscoveryresponse) -- [Resource](#resource) - - - - -##### Source File: `envoy/api/v2/discovery.proto` - - - - - ---- -### DiscoveryRequest - - -A DiscoveryRequest requests a set of versioned resources of the same type for -a given Envoy node on some API. - -```yaml -"versionInfo": string -"node": .envoy.api.v2.core.Node -"resourceNames": []string -"typeUrl": string -"responseNonce": string -"errorDetail": .google.rpc.Status - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `versionInfo` | `string` | The version_info provided in the request messages will be the version_info received with the most recent successfully processed response or empty on the first request. It is expected that no new request is sent after a response is received until the Envoy instance is ready to ACK/NACK the new configuration. ACK/NACK takes place by returning the new API config version as applied or the previous API config version respectively. Each type_url (see below) has an independent version associated with it. | | -| `node` | [.envoy.api.v2.core.Node](../core/base.proto.sk#node) | The node making the request. | | -| `resourceNames` | `[]string` | List of resources to subscribe to, e.g. list of cluster names or a route configuration name. If this is empty, all resources for the API are returned. LDS/CDS expect empty resource_names, since this is global discovery for the Envoy instance. The LDS and CDS responses will then imply a number of resources that need to be fetched via EDS/RDS, which will be explicitly enumerated in resource_names. | | -| `typeUrl` | `string` | Type of the resource that is being requested, e.g. "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit in requests made via singleton xDS APIs such as CDS, LDS, etc. but is required for ADS. | | -| `responseNonce` | `string` | nonce corresponding to DiscoveryResponse being ACK/NACKed. See above discussion on version_info and the DiscoveryResponse nonce comment. This may be empty if no nonce is available, e.g. at startup or for non-stream xDS implementations. | | -| `errorDetail` | [.google.rpc.Status](../../../../google/rpc/status.proto.sk#status) | This is populated when the previous :ref:`DiscoveryResponse ` failed to update configuration. The *message* field in *error_details* provides the Envoy internal exception related to the failure. It is only intended for consumption during manual debugging, the string provided is not guaranteed to be stable across Envoy versions. | | - - - - ---- -### DiscoveryResponse - - - -```yaml -"versionInfo": string -"resources": []google.protobuf.Any -"canary": bool -"typeUrl": string -"nonce": string -"controlPlane": .envoy.api.v2.core.ControlPlane - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `versionInfo` | `string` | The version of the response data. | | -| `resources` | [[]google.protobuf.Any](../../../../google/protobuf/any.proto.sk#any) | The response resources. These resources are typed and depend on the API being called. | | -| `canary` | `bool` | [#not-implemented-hide:] Canary is used to support two Envoy command line flags: * --terminate-on-canary-transition-failure. When set, Envoy is able to terminate if it detects that configuration is stuck at canary. Consider this example sequence of updates: - Management server applies a canary config successfully. - Management server rolls back to a production config. - Envoy rejects the new production config. Since there is no sensible way to continue receiving configuration updates, Envoy will then terminate and apply production config from a clean slate. * --dry-run-canary. When set, a canary response will never be applied, only validated via a dry run. | | -| `typeUrl` | `string` | Type URL for resources. This must be consistent with the type_url in the Any messages for resources if resources is non-empty. This effectively identifies the xDS API when muxing over ADS. | | -| `nonce` | `string` | For gRPC based subscriptions, the nonce provides a way to explicitly ack a specific DiscoveryResponse in a following DiscoveryRequest. Additional messages may have been sent by Envoy to the management server for the previous version on the stream prior to this DiscoveryResponse, that were unprocessed at response send time. The nonce allows the management server to ignore any further DiscoveryRequests for the previous version until a DiscoveryRequest bearing the nonce. The nonce is optional and is not required for non-stream based xDS implementations. | | -| `controlPlane` | [.envoy.api.v2.core.ControlPlane](../core/base.proto.sk#controlplane) | [#not-implemented-hide:] The control plane instance that sent the response. | | - - - - ---- -### DeltaDiscoveryRequest - - -DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -endpoint for Delta xDS. - -With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -diff to the state of a xDS client. -In Delta XDS there are per resource versions, which allow tracking state at -the resource granularity. -An xDS Delta session is always in the context of a gRPC bidirectional -stream. This allows the xDS server to keep track of the state of xDS clients -connected to it. - -In Delta xDS the nonce field is required and used to pair -DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -Optionally, a response message level system_version_info is present for -debugging purposes only. - -DeltaDiscoveryRequest can be sent in 3 situations: - 1. Initial message in a xDS bidirectional gRPC stream. - 2. As a ACK or NACK response to a previous DeltaDiscoveryResponse. - In this case the response_nonce is set to the nonce value in the Response. - ACK or NACK is determined by the absence or presence of error_detail. - 3. Spontaneous DeltaDiscoveryRequest from the client. - This can be done to dynamically add or remove elements from the tracked - resource_names set. In this case response_nonce must be omitted. - -```yaml -"node": .envoy.api.v2.core.Node -"typeUrl": string -"resourceNamesSubscribe": []string -"resourceNamesUnsubscribe": []string -"initialResourceVersions": map -"responseNonce": string -"errorDetail": .google.rpc.Status - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `node` | [.envoy.api.v2.core.Node](../core/base.proto.sk#node) | The node making the request. | | -| `typeUrl` | `string` | Type of the resource that is being requested, e.g. "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit in requests made via singleton xDS APIs such as CDS, LDS, etc. but is required for ADS. | | -| `resourceNamesSubscribe` | `[]string` | DeltaDiscoveryRequests allow the client to add or remove individual resources to the set of tracked resources in the context of a stream. All resource names in the resource_names_subscribe list are added to the set of tracked resources and all resource names in the resource_names_unsubscribe list are removed from the set of tracked resources. Unlike in state-of-the-world xDS, an empty resource_names_subscribe or resource_names_unsubscribe list simply means that no resources are to be added or removed to the resource list. The xDS server must send updates for all tracked resources but can also send updates for resources the client has not subscribed to. This behavior is similar to state-of-the-world xDS. These two fields can be set for all types of DeltaDiscoveryRequests (initial, ACK/NACK or spontaneous). A list of Resource names to add to the list of tracked resources. | | -| `resourceNamesUnsubscribe` | `[]string` | A list of Resource names to remove from the list of tracked resources. | | -| `initialResourceVersions` | `map` | This map must be populated when the DeltaDiscoveryRequest is the first in a stream (assuming there are any resources - this field's purpose is to enable a session to continue in a reconnected gRPC stream, and so will not be used in the very first stream of a session). The keys are the resources names of the xDS resources known to the xDS client. The values in the map are the associated resource level version info. | | -| `responseNonce` | `string` | When the DeltaDiscoveryRequest is a ACK or NACK message in response to a previous DeltaDiscoveryResponse, the response_nonce must be the nonce in the DeltaDiscoveryResponse. Otherwise response_nonce must be omitted. | | -| `errorDetail` | [.google.rpc.Status](../../../../google/rpc/status.proto.sk#status) | This is populated when the previous :ref:`DiscoveryResponse ` failed to update configuration. The *message* field in *error_details* provides the Envoy internal exception related to the failure. | | - - - - ---- -### DeltaDiscoveryResponse - - - -```yaml -"systemVersionInfo": string -"resources": []envoy.api.v2.Resource -"removedResources": []string -"nonce": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `systemVersionInfo` | `string` | The version of the response data (used for debugging). | | -| `resources` | [[]envoy.api.v2.Resource](../discovery.proto.sk#resource) | The response resources. These are typed resources that match the type url in the DeltaDiscoveryRequest. | | -| `removedResources` | `[]string` | Resources names of resources that have be deleted and to be removed from the xDS Client. Removed resources for missing resources can be ignored. | | -| `nonce` | `string` | The nonce provides a way for DeltaDiscoveryRequests to uniquely reference a DeltaDiscoveryResponse. The nonce is required. | | - - - - ---- -### Resource - - - -```yaml -"name": string -"version": string -"resource": .google.protobuf.Any - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | The resource's name, to distinguish it from others of the same type of resource. | | -| `version` | `string` | The resource level version. It allows xDS to track the state of individual resources. | | -| `resource` | [.google.protobuf.Any](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/any) | The resource being tracked. | | - - - - - - - - diff --git a/test/mocks/api/doc/envoy/type/percent.proto.sk.md b/test/mocks/api/doc/envoy/type/percent.proto.sk.md deleted file mode 100644 index 145675fb4..000000000 --- a/test/mocks/api/doc/envoy/type/percent.proto.sk.md +++ /dev/null @@ -1,88 +0,0 @@ - ---- -title: "percent.proto" -weight: 5 ---- - - - - -### Package: `envoy.type` -#### Types: - - -- [Percent](#percent) -- [FractionalPercent](#fractionalpercent) -- [DenominatorType](#denominatortype) - - - - -##### Source File: `envoy/type/percent.proto` - - - - - ---- -### Percent - - -Identifies a percentage, in the range [0.0, 100.0]. - -```yaml -"value": float - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `float` | | | - - - - ---- -### FractionalPercent - - -A fractional percentage is used in cases in which for performance reasons performing floating -point to integer conversions during randomness calculations is undesirable. The message includes -both a numerator and denominator that together determine the final fractional value. - -* **Example**: 1/100 = 1%. -* **Example**: 3/10000 = 0.03%. - -```yaml -"numerator": int -"denominator": .envoy.type.FractionalPercent.DenominatorType - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `numerator` | `int` | Specifies the numerator. Defaults to 0. | | -| `denominator` | [.envoy.type.FractionalPercent.DenominatorType](../percent.proto.sk#denominatortype) | Specifies the denominator. If the denominator specified is less than the numerator, the final fractional percentage is capped at 1 (100%). | | - - - - ---- -### DenominatorType - - -Fraction percentages support several fixed denominator values. - -| Name | Description | -| ----- | ----------- | -| `HUNDRED` | 100. **Example**: 1/100 = 1%. | -| `TEN_THOUSAND` | 10,000. **Example**: 1/10000 = 0.01%. | -| `MILLION` | 1,000,000. **Example**: 1/1000000 = 0.0001%. | - - - - - - - - diff --git a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md b/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md deleted file mode 100644 index bf8270335..000000000 --- a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/metadata.proto.sk.md +++ /dev/null @@ -1,57 +0,0 @@ - ---- -title: "metadata.proto" -weight: 5 ---- - - - - -### Package: `core.solo.io` -#### Types: - - -- [Metadata](#metadata) - - - - -##### Source File: [github.com/solo-io/solo-kit/api/v1/metadata.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/metadata.proto) - - - - - ---- -### Metadata - - -* -Metadata contains general properties of resources for purposes of versioning, annotating, and namespacing. - -```yaml -"name": string -"namespace": string -"cluster": string -"resourceVersion": string -"labels": map -"annotations": map - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | Name of the resource. Names must be unique and follow the following syntax rules: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters. | | -| `namespace` | `string` | Namespace is used for the namespacing of resources. | | -| `cluster` | `string` | Cluster indicates the cluster this resource belongs to Cluster is only applicable in certain contexts, e.g. Kubernetes An empty string here refers to the local cluster | | -| `resourceVersion` | `string` | An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. | | -| `labels` | `map` | Map of string keys and values that can be used to organize and categorize (scope and select) objects. Some resources contain `selectors` which can be linked with other resources by their labels | | -| `annotations` | `map` | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. | | - - - - - - - - diff --git a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md b/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md deleted file mode 100644 index 7e9901caa..000000000 --- a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/solo-kit.proto.sk.md +++ /dev/null @@ -1,51 +0,0 @@ - ---- -title: "solo-kit.proto" -weight: 5 ---- - - - - -### Package: `core.solo.io` -#### Types: - - -- [Resource](#resource) - - - - -##### Source File: [github.com/solo-io/solo-kit/api/v1/solo-kit.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/solo-kit.proto) - - - - - ---- -### Resource - - - -```yaml -"shortName": string -"pluralName": string -"clusterScoped": bool -"skipDocsGen": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `shortName` | `string` | becomes the kubernetes short name for the generated crd | | -| `pluralName` | `string` | becomes the kubernetes plural name for the generated crd | | -| `clusterScoped` | `bool` | the resource lives at the cluster level, namespace is ignored by the server | | -| `skipDocsGen` | `bool` | indicates whether documentation generation has to be skipped for the given resource, defaults to false | | - - - - - - - - diff --git a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md b/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md deleted file mode 100644 index c10e8b579..000000000 --- a/test/mocks/api/doc/github.com/solo-io/solo-kit/api/v1/status.proto.sk.md +++ /dev/null @@ -1,69 +0,0 @@ - ---- -title: "status.proto" -weight: 5 ---- - - - - -### Package: `core.solo.io` -#### Types: - - -- [Status](#status) -- [State](#state) - - - - -##### Source File: [github.com/solo-io/solo-kit/api/v1/status.proto](https://github.com/solo-io/solo-kit/blob/master/api/v1/status.proto) - - - - - ---- -### Status - - -* -Status indicates whether a resource has been (in)validated by a reporter in the system. -Statuses are meant to be read-only by users - -```yaml -"state": .core.solo.io.Status.State -"reason": string -"reportedBy": string -"subresourceStatuses": map - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `state` | [.core.solo.io.Status.State](../status.proto.sk#state) | State is the enum indicating the state of the resource | | -| `reason` | `string` | Reason is a description of the error for Rejected resources. If the resource is pending or accepted, this field will be empty | | -| `reportedBy` | `string` | Reference to the reporter who wrote this status | | -| `subresourceStatuses` | `map` | Reference to statuses (by resource-ref string: "Kind.Namespace.Name") of subresources of the parent resource | | - - - - ---- -### State - - - -| Name | Description | -| ----- | ----------- | -| `Pending` | Pending status indicates the resource has not yet been validated | -| `Accepted` | Accepted indicates the resource has been validated | -| `Rejected` | Rejected indicates an invalid configuration by the user Rejected resources may be propagated to the xDS server depending on their severity | - - - - - - - - diff --git a/test/mocks/api/doc/gogoproto/gogo.proto.sk.md b/test/mocks/api/doc/gogoproto/gogo.proto.sk.md deleted file mode 100644 index 47c3fc0b8..000000000 --- a/test/mocks/api/doc/gogoproto/gogo.proto.sk.md +++ /dev/null @@ -1,51 +0,0 @@ - ---- -title: "gogo.proto" -weight: 5 ---- - - - - -### Package: `gogoproto` -Protocol Buffers for Go with Gadgets - -Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - - -##### Source File: `gogoproto/gogo.proto` - - - - - - - - - diff --git a/test/mocks/api/doc/google/api/annotations.proto.sk.md b/test/mocks/api/doc/google/api/annotations.proto.sk.md deleted file mode 100644 index 88ca011ec..000000000 --- a/test/mocks/api/doc/google/api/annotations.proto.sk.md +++ /dev/null @@ -1,37 +0,0 @@ - ---- -title: "annotations.proto" -weight: 5 ---- - - - - -### Package: `google.api` -Copyright (c) 2015, Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - - - -##### Source File: `google/api/annotations.proto` - - - - - - - - - diff --git a/test/mocks/api/doc/google/api/http.proto.sk.md b/test/mocks/api/doc/google/api/http.proto.sk.md deleted file mode 100644 index 051dfd26a..000000000 --- a/test/mocks/api/doc/google/api/http.proto.sk.md +++ /dev/null @@ -1,338 +0,0 @@ - ---- -title: "http.proto" -weight: 5 ---- - - - - -### Package: `google.api` -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - - -#### Types: - - -- [Http](#http) -- [HttpRule](#httprule) -- [CustomHttpPattern](#customhttppattern) - - - - -##### Source File: `google/api/http.proto` - - - - - ---- -### Http - - -Defines the HTTP configuration for an API service. It contains a list of -[HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -to one or more HTTP REST API methods. - -```yaml -"rules": []google.api.HttpRule -"fullyDecodeReservedExpansion": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `rules` | [[]google.api.HttpRule](../http.proto.sk#httprule) | A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow "last one wins" order. | | -| `fullyDecodeReservedExpansion` | `bool` | When set to true, URL path parmeters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where "%2F" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches. | | - - - - ---- -### HttpRule - - -`HttpRule` defines the mapping of an RPC method to one or more HTTP -REST API methods. The mapping specifies how different portions of the RPC -request message are mapped to URL path, URL query parameters, and -HTTP request body. The mapping is typically specified as an -`google.api.http` annotation on the RPC method, -see "google/api/annotations.proto" for details. - -The mapping consists of a field specifying the path template and -method kind. The path template can refer to fields in the request -message, as in the example below which describes a REST GET -operation on a resource collection of messages: - - - service Messaging { - rpc GetMessage(GetMessageRequest) returns (Message) { - option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; - } - } - message GetMessageRequest { - message SubMessage { - string subfield = 1; - } - string message_id = 1; // mapped to the URL - SubMessage sub = 2; // `sub.subfield` is url-mapped - } - message Message { - string text = 1; // content of the resource - } - -The same http annotation can alternatively be expressed inside the -`GRPC API Configuration` YAML file. - - http: - rules: - - selector: .Messaging.GetMessage - get: /v1/messages/{message_id}/{sub.subfield} - -This definition enables an automatic, bidrectional mapping of HTTP -JSON to RPC. Example: - -HTTP | RPC ------|----- -`GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` - -In general, not only fields but also field paths can be referenced -from a path pattern. Fields mapped to the path pattern cannot be -repeated and must have a primitive (non-message) type. - -Any fields in the request message which are not bound by the path -pattern automatically become (optional) HTTP query -parameters. Assume the following definition of the request message: - - - service Messaging { - rpc GetMessage(GetMessageRequest) returns (Message) { - option (google.api.http).get = "/v1/messages/{message_id}"; - } - } - message GetMessageRequest { - message SubMessage { - string subfield = 1; - } - string message_id = 1; // mapped to the URL - int64 revision = 2; // becomes a parameter - SubMessage sub = 3; // `sub.subfield` becomes a parameter - } - - -This enables a HTTP JSON to RPC mapping as below: - -HTTP | RPC ------|----- -`GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` - -Note that fields which are mapped to HTTP parameters must have a -primitive type or a repeated primitive type. Message types are not -allowed. In the case of a repeated type, the parameter can be -repeated in the URL, as in `...?param=A¶m=B`. - -For HTTP method kinds which allow a request body, the `body` field -specifies the mapping. Consider a REST update method on the -message resource collection: - - - service Messaging { - rpc UpdateMessage(UpdateMessageRequest) returns (Message) { - option (google.api.http) = { - put: "/v1/messages/{message_id}" - body: "message" - }; - } - } - message UpdateMessageRequest { - string message_id = 1; // mapped to the URL - Message message = 2; // mapped to the body - } - - -The following HTTP JSON to RPC mapping is enabled, where the -representation of the JSON in the request body is determined by -protos JSON encoding: - -HTTP | RPC ------|----- -`PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` - -The special name `*` can be used in the body mapping to define that -every field not bound by the path template should be mapped to the -request body. This enables the following alternative definition of -the update method: - - service Messaging { - rpc UpdateMessage(Message) returns (Message) { - option (google.api.http) = { - put: "/v1/messages/{message_id}" - body: "*" - }; - } - } - message Message { - string message_id = 1; - string text = 2; - } - - -The following HTTP JSON to RPC mapping is enabled: - -HTTP | RPC ------|----- -`PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` - -Note that when using `*` in the body mapping, it is not possible to -have HTTP parameters, as all fields not bound by the path end in -the body. This makes this option more rarely used in practice of -defining REST APIs. The common usage of `*` is in custom methods -which don't use the URL at all for transferring data. - -It is possible to define multiple HTTP methods for one RPC by using -the `additional_bindings` option. Example: - - service Messaging { - rpc GetMessage(GetMessageRequest) returns (Message) { - option (google.api.http) = { - get: "/v1/messages/{message_id}" - additional_bindings { - get: "/v1/users/{user_id}/messages/{message_id}" - } - }; - } - } - message GetMessageRequest { - string message_id = 1; - string user_id = 2; - } - - -This enables the following two alternative HTTP JSON to RPC -mappings: - -HTTP | RPC ------|----- -`GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -`GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` - -# Rules for HTTP mapping - -The rules for mapping HTTP path, query parameters, and body fields -to the request message are as follows: - -1. The `body` field specifies either `*` or a field path, or is - omitted. If omitted, it indicates there is no HTTP request body. -2. Leaf fields (recursive expansion of nested messages in the - request) can be classified into three types: - (a) Matched in the URL template. - (b) Covered by body (if body is `*`, everything except (a) fields; - else everything under the body field) - (c) All other fields. -3. URL query parameters found in the HTTP request are mapped to (c) fields. -4. Any body sent with an HTTP request can contain only (b) fields. - -The syntax of the path template is as follows: - - Template = "/" Segments [ Verb ] ; - Segments = Segment { "/" Segment } ; - Segment = "*" | "**" | LITERAL | Variable ; - Variable = "{" FieldPath [ "=" Segments ] "}" ; - FieldPath = IDENT { "." IDENT } ; - Verb = ":" LITERAL ; - -The syntax `*` matches a single path segment. The syntax `**` matches zero -or more path segments, which must be the last part of the path except the -`Verb`. The syntax `LITERAL` matches literal text in the path. - -The syntax `Variable` matches part of the URL path as specified by its -template. A variable template must not contain other variables. If a variable -matches a single path segment, its template may be omitted, e.g. `{var}` -is equivalent to `{var=*}`. - -If a variable contains exactly one path segment, such as `"{var}"` or -`"{var=*}"`, when such a variable is expanded into a URL path, all characters -except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the -Discovery Document as `{var}`. - -If a variable contains one or more path segments, such as `"{var=foo/*}"` -or `"{var=**}"`, when such a variable is expanded into a URL path, all -characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables -show up in the Discovery Document as `{+var}`. - -NOTE: While the single segment variable matches the semantics of -[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 -Simple String Expansion, the multi segment variable **does not** match -RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion -does not expand special characters like `?` and `#`, which would lead -to invalid URLs. - -NOTE: the field paths in variables and in the `body` must not refer to -repeated fields or map fields. - -```yaml -"selector": string -"get": string -"put": string -"post": string -"delete": string -"patch": string -"custom": .google.api.CustomHttpPattern -"body": string -"additionalBindings": []google.api.HttpRule - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `selector` | `string` | Selects methods to which this rule applies. Refer to [selector][google.api.DocumentationRule.selector] for syntax details. | | -| `get` | `string` | Used for listing and getting information about resources. | | -| `put` | `string` | Used for updating a resource. | | -| `post` | `string` | Used for creating a resource. | | -| `delete` | `string` | Used for deleting a resource. | | -| `patch` | `string` | Used for updating a resource. | | -| `custom` | [.google.api.CustomHttpPattern](../http.proto.sk#customhttppattern) | The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients. | | -| `body` | `string` | The name of the request field whose value is mapped to the HTTP body, or `*` for mapping all fields not captured by the path pattern to the HTTP body. NOTE: the referred field must not be a repeated field and must be present at the top-level of request message type. | | -| `additionalBindings` | [[]google.api.HttpRule](../http.proto.sk#httprule) | Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep). | | - - - - ---- -### CustomHttpPattern - - -A custom pattern is used for defining custom HTTP verb. - -```yaml -"kind": string -"path": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `kind` | `string` | The name of this custom HTTP verb. | | -| `path` | `string` | The path matched by this custom verb. | | - - - - - - - - diff --git a/test/mocks/api/doc/google/protobuf/descriptor.proto.sk.md b/test/mocks/api/doc/google/protobuf/descriptor.proto.sk.md deleted file mode 100644 index e837aad9d..000000000 --- a/test/mocks/api/doc/google/protobuf/descriptor.proto.sk.md +++ /dev/null @@ -1,832 +0,0 @@ - ---- -title: "descriptor.proto" -weight: 5 ---- - - - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Author: kenton@google.com (Kenton Varda) - Based on original Protocol Buffers design by - Sanjay Ghemawat, Jeff Dean, and others. - -The messages in this file describe the definitions found in .proto files. -A valid .proto file can be translated directly to a FileDescriptorProto -without any other information (e.g. without reading its imports). - - - -#### Types: - - -- [FileDescriptorSet](#filedescriptorset) -- [FileDescriptorProto](#filedescriptorproto) -- [DescriptorProto](#descriptorproto) -- [ExtensionRange](#extensionrange) -- [ReservedRange](#reservedrange) -- [FieldDescriptorProto](#fielddescriptorproto) -- [Type](#type) -- [Label](#label) -- [OneofDescriptorProto](#oneofdescriptorproto) -- [EnumDescriptorProto](#enumdescriptorproto) -- [EnumValueDescriptorProto](#enumvaluedescriptorproto) -- [ServiceDescriptorProto](#servicedescriptorproto) -- [MethodDescriptorProto](#methoddescriptorproto) -- [FileOptions](#fileoptions) -- [OptimizeMode](#optimizemode) -- [MessageOptions](#messageoptions) -- [FieldOptions](#fieldoptions) -- [CType](#ctype) -- [JSType](#jstype) -- [OneofOptions](#oneofoptions) -- [EnumOptions](#enumoptions) -- [EnumValueOptions](#enumvalueoptions) -- [ServiceOptions](#serviceoptions) -- [MethodOptions](#methodoptions) -- [IdempotencyLevel](#idempotencylevel) -- [UninterpretedOption](#uninterpretedoption) -- [NamePart](#namepart) -- [SourceCodeInfo](#sourcecodeinfo) -- [Location](#location) -- [GeneratedCodeInfo](#generatedcodeinfo) -- [Annotation](#annotation) - - - - -##### Source File: `google/protobuf/descriptor.proto` - - - - - ---- -### FileDescriptorSet - - -The protocol compiler can output a FileDescriptorSet containing the .proto -files it parses. - -```yaml -"file": []google.protobuf.FileDescriptorProto - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `file` | [[]google.protobuf.FileDescriptorProto](../descriptor.proto.sk#filedescriptorproto) | | | - - - - ---- -### FileDescriptorProto - - -Describes a complete .proto file. - -```yaml -"name": string -"package": string -"dependency": []string -"publicDependency": []int -"weakDependency": []int -"messageType": []google.protobuf.DescriptorProto -"enumType": []google.protobuf.EnumDescriptorProto -"service": []google.protobuf.ServiceDescriptorProto -"extension": []google.protobuf.FieldDescriptorProto -"options": .google.protobuf.FileOptions -"sourceCodeInfo": .google.protobuf.SourceCodeInfo -"syntax": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `package` | `string` | | | -| `dependency` | `[]string` | Names of files imported by this file. | | -| `publicDependency` | `[]int` | Indexes of the public imported files in the dependency list above. | | -| `weakDependency` | `[]int` | Indexes of the weak imported files in the dependency list. For Google-internal migration only. Do not use. | | -| `messageType` | [[]google.protobuf.DescriptorProto](../descriptor.proto.sk#descriptorproto) | All top-level definitions in this file. | | -| `enumType` | [[]google.protobuf.EnumDescriptorProto](../descriptor.proto.sk#enumdescriptorproto) | | | -| `service` | [[]google.protobuf.ServiceDescriptorProto](../descriptor.proto.sk#servicedescriptorproto) | | | -| `extension` | [[]google.protobuf.FieldDescriptorProto](../descriptor.proto.sk#fielddescriptorproto) | | | -| `options` | [.google.protobuf.FileOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options) | | | -| `sourceCodeInfo` | [.google.protobuf.SourceCodeInfo](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/source-code-info) | This field contains optional information about the original source code. You may safely remove this entire field without harming runtime functionality of the descriptors -- the information is needed only by development tools. | | -| `syntax` | `string` | The syntax of the proto file. The supported values are "proto2" and "proto3". | | - - - - ---- -### DescriptorProto - - -Describes a message type. - -```yaml -"name": string -"field": []google.protobuf.FieldDescriptorProto -"extension": []google.protobuf.FieldDescriptorProto -"nestedType": []google.protobuf.DescriptorProto -"enumType": []google.protobuf.EnumDescriptorProto -"extensionRange": []google.protobuf.DescriptorProto.ExtensionRange -"oneofDecl": []google.protobuf.OneofDescriptorProto -"options": .google.protobuf.MessageOptions -"reservedRange": []google.protobuf.DescriptorProto.ReservedRange -"reservedName": []string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `field` | [[]google.protobuf.FieldDescriptorProto](../descriptor.proto.sk#fielddescriptorproto) | | | -| `extension` | [[]google.protobuf.FieldDescriptorProto](../descriptor.proto.sk#fielddescriptorproto) | | | -| `nestedType` | [[]google.protobuf.DescriptorProto](../descriptor.proto.sk#descriptorproto) | | | -| `enumType` | [[]google.protobuf.EnumDescriptorProto](../descriptor.proto.sk#enumdescriptorproto) | | | -| `extensionRange` | [[]google.protobuf.DescriptorProto.ExtensionRange](../descriptor.proto.sk#extensionrange) | | | -| `oneofDecl` | [[]google.protobuf.OneofDescriptorProto](../descriptor.proto.sk#oneofdescriptorproto) | | | -| `options` | [.google.protobuf.MessageOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/message-options) | | | -| `reservedRange` | [[]google.protobuf.DescriptorProto.ReservedRange](../descriptor.proto.sk#reservedrange) | | | -| `reservedName` | `[]string` | Reserved field names, which may not be used by fields in the same message. A given name may only be reserved once. | | - - - - ---- -### ExtensionRange - - - -```yaml -"start": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `start` | `int` | | | -| `end` | `int` | | | - - - - ---- -### ReservedRange - - -Range of reserved tag numbers. Reserved tag numbers may not be used by -fields or extension ranges in the same message. Reserved ranges may -not overlap. - -```yaml -"start": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `start` | `int` | | | -| `end` | `int` | | | - - - - ---- -### FieldDescriptorProto - - -Describes a field within a message. - -```yaml -"name": string -"number": int -"label": .google.protobuf.FieldDescriptorProto.Label -"type": .google.protobuf.FieldDescriptorProto.Type -"typeName": string -"extendee": string -"defaultValue": string -"oneofIndex": int -"jsonName": string -"options": .google.protobuf.FieldOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `number` | `int` | | | -| `label` | [.google.protobuf.FieldDescriptorProto.Label](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-label) | | | -| `type` | [.google.protobuf.FieldDescriptorProto.Type](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-descriptor-proto.-type) | If type_name is set, this need not be set. If both this and type_name are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. | | -| `typeName` | `string` | For message and enum types, this is the name of the type. If the name starts with a '.', it is fully-qualified. Otherwise, C++-like scoping rules are used to find the type (i.e. first the nested types within this message are searched, then within the parent, on up to the root namespace). | | -| `extendee` | `string` | For extensions, this is the name of the type being extended. It is resolved in the same manner as type_name. | | -| `defaultValue` | `string` | For numeric types, contains the original text representation of the value. For booleans, "true" or "false". For strings, contains the default text contents (not escaped in any way). For bytes, contains the C escaped value. All bytes >= 128 are escaped. TODO(kenton): Base-64 encode? | | -| `oneofIndex` | `int` | If set, gives the index of a oneof in the containing type's oneof_decl list. This field is a member of that oneof. | | -| `jsonName` | `string` | JSON name of this field. The value is set by protocol compiler. If the user has set a "json_name" option on this field, that option's value will be used. Otherwise, it's deduced from the field's name by converting it to camelCase. | | -| `options` | [.google.protobuf.FieldOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options) | | | - - - - ---- -### Type - - - -| Name | Description | -| ----- | ----------- | -| `TYPE_DOUBLE` | 0 is reserved for errors. Order is weird for historical reasons. | -| `TYPE_FLOAT` | | -| `TYPE_INT64` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if negative values are likely. | -| `TYPE_UINT64` | | -| `TYPE_INT32` | Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if negative values are likely. | -| `TYPE_FIXED64` | | -| `TYPE_FIXED32` | | -| `TYPE_BOOL` | | -| `TYPE_STRING` | | -| `TYPE_GROUP` | Tag-delimited aggregate. Group type is deprecated and not supported in proto3. However, Proto3 implementations should still be able to parse the group wire format and treat group fields as unknown fields. | -| `TYPE_MESSAGE` | | -| `TYPE_BYTES` | New in version 2. | -| `TYPE_UINT32` | | -| `TYPE_ENUM` | | -| `TYPE_SFIXED32` | | -| `TYPE_SFIXED64` | | -| `TYPE_SINT32` | | -| `TYPE_SINT64` | | - - - - ---- -### Label - - - -| Name | Description | -| ----- | ----------- | -| `LABEL_OPTIONAL` | 0 is reserved for errors | -| `LABEL_REQUIRED` | | -| `LABEL_REPEATED` | | - - - - ---- -### OneofDescriptorProto - - -Describes a oneof. - -```yaml -"name": string -"options": .google.protobuf.OneofOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `options` | [.google.protobuf.OneofOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/oneof-options) | | | - - - - ---- -### EnumDescriptorProto - - -Describes an enum type. - -```yaml -"name": string -"value": []google.protobuf.EnumValueDescriptorProto -"options": .google.protobuf.EnumOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `value` | [[]google.protobuf.EnumValueDescriptorProto](../descriptor.proto.sk#enumvaluedescriptorproto) | | | -| `options` | [.google.protobuf.EnumOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-options) | | | - - - - ---- -### EnumValueDescriptorProto - - -Describes a value within an enum. - -```yaml -"name": string -"number": int -"options": .google.protobuf.EnumValueOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `number` | `int` | | | -| `options` | [.google.protobuf.EnumValueOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/enum-value-options) | | | - - - - ---- -### ServiceDescriptorProto - - -Describes a service. - -```yaml -"name": string -"method": []google.protobuf.MethodDescriptorProto -"options": .google.protobuf.ServiceOptions - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `method` | [[]google.protobuf.MethodDescriptorProto](../descriptor.proto.sk#methoddescriptorproto) | | | -| `options` | [.google.protobuf.ServiceOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/service-options) | | | - - - - ---- -### MethodDescriptorProto - - -Describes a method of a service. - -```yaml -"name": string -"inputType": string -"outputType": string -"options": .google.protobuf.MethodOptions -"clientStreaming": bool -"serverStreaming": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | `string` | | | -| `inputType` | `string` | Input and output type names. These are resolved in the same way as FieldDescriptorProto.type_name, but must refer to a message type. | | -| `outputType` | `string` | | | -| `options` | [.google.protobuf.MethodOptions](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options) | | | -| `clientStreaming` | `bool` | Identifies if client streams multiple client messages | Default: false | -| `serverStreaming` | `bool` | Identifies if server streams multiple server messages | Default: false | - - - - ---- -### FileOptions - - - -```yaml -"javaPackage": string -"javaOuterClassname": string -"javaMultipleFiles": bool -"javaGenerateEqualsAndHash": bool -"javaStringCheckUtf8": bool -"optimizeFor": .google.protobuf.FileOptions.OptimizeMode -"goPackage": string -"ccGenericServices": bool -"javaGenericServices": bool -"pyGenericServices": bool -"deprecated": bool -"ccEnableArenas": bool -"objcClassPrefix": string -"csharpNamespace": string -"swiftPrefix": string -"phpClassPrefix": string -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `javaPackage` | `string` | Sets the Java package where classes generated from this .proto will be placed. By default, the proto package is used, but this is often inappropriate because proto packages do not normally start with backwards domain names. | | -| `javaOuterClassname` | `string` | If set, all the classes from the .proto file are wrapped in a single outer class with the given name. This applies to both Proto1 (equivalent to the old "--one_java_file" option) and Proto2 (where a .proto always translates to a single class, but you may want to explicitly choose the class name). | | -| `javaMultipleFiles` | `bool` | If set true, then the Java code generator will generate a separate .java file for each top-level message, enum, and service defined in the .proto file. Thus, these types will *not* be nested inside the outer class named by java_outer_classname. However, the outer class will still be generated to contain the file's getDescriptor() method as well as any top-level extensions defined in the file. | Default: false | -| `javaGenerateEqualsAndHash` | `bool` | This option does nothing. | | -| `javaStringCheckUtf8` | `bool` | If set true, then the Java2 code generator will generate code that throws an exception whenever an attempt is made to assign a non-UTF-8 byte sequence to a string field. Message reflection will do the same. However, an extension field still accepts non-UTF-8 byte sequences. This option has no effect on when used with the lite runtime. | Default: false | -| `optimizeFor` | [.google.protobuf.FileOptions.OptimizeMode](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/file-options.-optimize-mode) | | Default: SPEED | -| `goPackage` | `string` | Sets the Go package where structs generated from this .proto will be placed. If omitted, the Go package will be derived from the following: - The basename of the package import path, if provided. - Otherwise, the package statement in the .proto file, if present. - Otherwise, the basename of the .proto file, without extension. | | -| `ccGenericServices` | `bool` | Should generic services be generated in each language? "Generic" services are not specific to any particular RPC system. They are generated by the main code generators in each language (without additional plugins). Generic services were the only kind of service generation supported by early versions of google.protobuf. Generic services are now considered deprecated in favor of using plugins that generate code specific to your particular RPC system. Therefore, these default to false. Old code which depends on generic services should explicitly set them to true. | Default: false | -| `javaGenericServices` | `bool` | | Default: false | -| `pyGenericServices` | `bool` | | Default: false | -| `deprecated` | `bool` | Is this file deprecated? Depending on the target platform, this can emit Deprecated annotations for everything in the file, or it will be completely ignored; in the very least, this is a formalization for deprecating files. | Default: false | -| `ccEnableArenas` | `bool` | Enables the use of arenas for the proto messages in this file. This applies only to generated classes for C++. | Default: false | -| `objcClassPrefix` | `string` | Sets the objective c class prefix which is prepended to all objective c generated classes from this .proto. There is no default. | | -| `csharpNamespace` | `string` | Namespace for generated classes; defaults to the package. | | -| `swiftPrefix` | `string` | By default Swift generators will take the proto package and CamelCase it replacing '.' with underscore and use that to prefix the types/symbols defined. When this options is provided, they will use this value instead to prefix the types/symbols defined. | | -| `phpClassPrefix` | `string` | Sets the php class prefix which is prepended to all php generated classes from this .proto. Default is empty. | | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### OptimizeMode - - -Generated classes can be optimized for speed or code size. - -| Name | Description | -| ----- | ----------- | -| `SPEED` | | -| `CODE_SIZE` | etc. | -| `LITE_RUNTIME` | | - - - - ---- -### MessageOptions - - - -```yaml -"messageSetWireFormat": bool -"noStandardDescriptorAccessor": bool -"deprecated": bool -"mapEntry": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `messageSetWireFormat` | `bool` | Set true to use the old proto1 MessageSet wire format for extensions. This is provided for backwards-compatibility with the MessageSet wire format. You should not use this for any other reason: It's less efficient, has fewer features, and is more complicated. The message must be defined exactly as follows: message Foo { option message_set_wire_format = true; extensions 4 to max; } Note that the message cannot have any defined fields; MessageSets only have extensions. All extensions of your type must be singular messages; e.g. they cannot be int32s, enums, or repeated messages. Because this is an option, the above two restrictions are not enforced by the protocol compiler. | Default: false | -| `noStandardDescriptorAccessor` | `bool` | Disables the generation of the standard "descriptor()" accessor, which can conflict with a field of the same name. This is meant to make migration from proto1 easier; new code should avoid fields named "descriptor". | Default: false | -| `deprecated` | `bool` | Is this message deprecated? Depending on the target platform, this can emit Deprecated annotations for the message, or it will be completely ignored; in the very least, this is a formalization for deprecating messages. | Default: false | -| `mapEntry` | `bool` | Whether the message is an automatically generated map entry type for the maps field. For maps fields: map map_field = 1; The parsed descriptor looks like: message MapFieldEntry { option map_entry = true; optional KeyType key = 1; optional ValueType value = 2; } repeated MapFieldEntry map_field = 1; Implementations may choose not to generate the map_entry=true message, but use a native map in the target language to hold the keys and values. The reflection APIs in such implementions still need to work as if the field is a repeated message field. NOTE: Do not set the option in .proto files. Always use the maps syntax instead. The option should only be implicitly set by the proto compiler parser. | | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### FieldOptions - - - -```yaml -"ctype": .google.protobuf.FieldOptions.CType -"packed": bool -"jstype": .google.protobuf.FieldOptions.JSType -"lazy": bool -"deprecated": bool -"weak": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `ctype` | [.google.protobuf.FieldOptions.CType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.c-type) | The ctype option instructs the C++ code generator to use a different representation of the field than it normally would. See the specific options below. This option is not yet implemented in the open source release -- sorry, we'll try to include it in a future version! | Default: STRING | -| `packed` | `bool` | The packed option can be enabled for repeated primitive fields to enable a more efficient representation on the wire. Rather than repeatedly writing the tag and type for each element, the entire array is encoded as a single length-delimited blob. In proto3, only explicit setting it to false will avoid using packed encoding. | | -| `jstype` | [.google.protobuf.FieldOptions.JSType](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-options.js-type) | The jstype option determines the JavaScript type used for values of the field. The option is permitted only for 64 bit integral and fixed types (int64, uint64, sint64, fixed64, sfixed64). By default these types are represented as JavaScript strings. This avoids loss of precision that can happen when a large value is converted to a floating point JavaScript numbers. Specifying JS_NUMBER for the jstype causes the generated JavaScript code to use the JavaScript "number" type instead of strings. This option is an enum to permit additional types to be added, e.g. goog.math.Integer. | Default: JS_NORMAL | -| `lazy` | `bool` | Should this field be parsed lazily? Lazy applies only to message-type fields. It means that when the outer message is initially parsed, the inner message's contents will not be parsed but instead stored in encoded form. The inner message will actually be parsed when it is first accessed. This is only a hint. Implementations are free to choose whether to use eager or lazy parsing regardless of the value of this option. However, setting this option true suggests that the protocol author believes that using lazy parsing on this field is worth the additional bookkeeping overhead typically needed to implement it. This option does not affect the public interface of any generated code; all method signatures remain the same. Furthermore, thread-safety of the interface is not affected by this option; const methods remain safe to call from multiple threads concurrently, while non-const methods continue to require exclusive access. Note that implementations may choose not to check required fields within a lazy sub-message. That is, calling IsInitialized() on the outer message may return true even if the inner message has missing required fields. This is necessary because otherwise the inner message would have to be parsed in order to perform the check, defeating the purpose of lazy parsing. An implementation which chooses not to check required fields must be consistent about it. That is, for any particular sub-message, the implementation must either *always* check its required fields, or *never* check its required fields, regardless of whether or not the message has been parsed. | Default: false | -| `deprecated` | `bool` | Is this field deprecated? Depending on the target platform, this can emit Deprecated annotations for accessors, or it will be completely ignored; in the very least, this is a formalization for deprecating fields. | Default: false | -| `weak` | `bool` | For Google-internal migration only. Do not use. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### CType - - - -| Name | Description | -| ----- | ----------- | -| `STRING` | Default mode. | -| `CORD` | | -| `STRING_PIECE` | | - - - - ---- -### JSType - - - -| Name | Description | -| ----- | ----------- | -| `JS_NORMAL` | Use the default type. | -| `JS_STRING` | Use JavaScript strings. | -| `JS_NUMBER` | Use JavaScript numbers. | - - - - ---- -### OneofOptions - - - -```yaml -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### EnumOptions - - - -```yaml -"allowAlias": bool -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `allowAlias` | `bool` | Set this option to true to allow mapping different tag names to the same value. | | -| `deprecated` | `bool` | Is this enum deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum, or it will be completely ignored; in the very least, this is a formalization for deprecating enums. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### EnumValueOptions - - - -```yaml -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this enum value deprecated? Depending on the target platform, this can emit Deprecated annotations for the enum value, or it will be completely ignored; in the very least, this is a formalization for deprecating enum values. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### ServiceOptions - - - -```yaml -"deprecated": bool -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this service deprecated? Depending on the target platform, this can emit Deprecated annotations for the service, or it will be completely ignored; in the very least, this is a formalization for deprecating services. | Default: false | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### MethodOptions - - - -```yaml -"deprecated": bool -"idempotencyLevel": .google.protobuf.MethodOptions.IdempotencyLevel -"uninterpretedOption": []google.protobuf.UninterpretedOption - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `deprecated` | `bool` | Is this method deprecated? Depending on the target platform, this can emit Deprecated annotations for the method, or it will be completely ignored; in the very least, this is a formalization for deprecating methods. | Default: false | -| `idempotencyLevel` | [.google.protobuf.MethodOptions.IdempotencyLevel](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/method-options.-idempotency-level) | | Default: IDEMPOTENCY_UNKNOWN | -| `uninterpretedOption` | [[]google.protobuf.UninterpretedOption](../descriptor.proto.sk#uninterpretedoption) | The parser stores options it doesn't recognize here. See above. | | - - - - ---- -### IdempotencyLevel - - -Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -or neither? HTTP based RPC implementation may choose GET verb for safe -methods, and PUT verb for idempotent methods instead of the default POST. - -| Name | Description | -| ----- | ----------- | -| `IDEMPOTENCY_UNKNOWN` | | -| `NO_SIDE_EFFECTS` | | -| `IDEMPOTENT` | | - - - - ---- -### UninterpretedOption - - -A message representing a option the parser does not recognize. This only -appears in options protos created by the compiler::Parser class. -DescriptorPool resolves these when building Descriptor objects. Therefore, -options protos in descriptor objects (e.g. returned by Descriptor::options(), -or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -in them. - -```yaml -"name": []google.protobuf.UninterpretedOption.NamePart -"identifierValue": string -"positiveIntValue": int -"negativeIntValue": int -"doubleValue": float -"stringValue": bytes -"aggregateValue": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `name` | [[]google.protobuf.UninterpretedOption.NamePart](../descriptor.proto.sk#namepart) | | | -| `identifierValue` | `string` | The value of the uninterpreted option, in whatever type the tokenizer identified it as during parsing. Exactly one of these should be set. | | -| `positiveIntValue` | `int` | | | -| `negativeIntValue` | `int` | | | -| `doubleValue` | `float` | | | -| `stringValue` | `bytes` | | | -| `aggregateValue` | `string` | | | - - - - ---- -### NamePart - - -The name of the uninterpreted option. Each string represents a segment in -a dot-separated name. is_extension is true iff a segment represents an -extension (denoted with parentheses in options specs in .proto files). -E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -"foo.(bar.baz).qux". - -```yaml -"namePart": string -"isExtension": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `namePart` | `string` | | | -| `isExtension` | `bool` | | | - - - - ---- -### SourceCodeInfo - - -Encapsulates information about the original source file from which a -FileDescriptorProto was generated. - -```yaml -"location": []google.protobuf.SourceCodeInfo.Location - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `location` | [[]google.protobuf.SourceCodeInfo.Location](../descriptor.proto.sk#location) | A Location identifies a piece of source code in a .proto file which corresponds to a particular definition. This information is intended to be useful to IDEs, code indexers, documentation generators, and similar tools. For example, say we have a file like: message Foo { optional string foo = 1; } Let's look at just the field definition: optional string foo = 1; ^ ^^ ^^ ^ ^^^ a bc de f ghi We have the following locations: span path represents [a,i) [ 4, 0, 2, 0 ] The whole field definition. [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). [c,d) [ 4, 0, 2, 0, 5 ] The type (string). [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). [g,h) [ 4, 0, 2, 0, 3 ] The number (1). Notes: - A location may refer to a repeated field itself (i.e. not to any particular index within it). This is used whenever a set of elements are logically enclosed in a single code segment. For example, an entire extend block (possibly containing multiple extension definitions) will have an outer location whose path refers to the "extensions" repeated field without an index. - Multiple locations may have the same path. This happens when a single logical declaration is spread out across multiple places. The most obvious example is the "extend" block again -- there may be multiple extend blocks in the same scope, each of which will have the same path. - A location's span is not always a subset of its parent's span. For example, the "extendee" of an extension declaration appears at the beginning of the "extend" block and is shared by all extensions within the block. - Just because a location's span is a subset of some other location's span does not mean that it is a descendent. For example, a "group" defines both a type and a field in a single declaration. Thus, the locations corresponding to the type and field and their components will overlap. - Code which tries to interpret locations should probably be designed to ignore those that it doesn't understand, as more types of locations could be recorded in the future. | | - - - - ---- -### Location - - - -```yaml -"path": []int -"span": []int -"leadingComments": string -"trailingComments": string -"leadingDetachedComments": []string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `path` | `[]int` | Identifies which part of the FileDescriptorProto was defined at this location. Each element is a field number or an index. They form a path from the root FileDescriptorProto to the place where the definition. For example, this path: [ 4, 3, 2, 7, 1 ] refers to: file.message_type(3) // 4, 3 .field(7) // 2, 7 .name() // 1 This is because FileDescriptorProto.message_type has field number 4: repeated DescriptorProto message_type = 4; and DescriptorProto.field has field number 2: repeated FieldDescriptorProto field = 2; and FieldDescriptorProto.name has field number 1: optional string name = 1; Thus, the above path gives the location of a field name. If we removed the last element: [ 4, 3, 2, 7 ] this path refers to the whole field declaration (from the beginning of the label to the terminating semicolon). | | -| `span` | `[]int` | Always has exactly three or four elements: start line, start column, end line (optional, otherwise assumed same as start line), end column. These are packed into a single field for efficiency. Note that line and column numbers are zero-based -- typically you will want to add 1 to each before displaying to a user. | | -| `leadingComments` | `string` | If this SourceCodeInfo represents a complete declaration, these are any comments appearing before and after the declaration which appear to be attached to the declaration. A series of line comments appearing on consecutive lines, with no other tokens appearing on those lines, will be treated as a single comment. leading_detached_comments will keep paragraphs of comments that appear before (but not connected to) the current element. Each paragraph, separated by empty lines, will be one comment element in the repeated field. Only the comment content is provided; comment markers (e.g. //) are stripped out. For block comments, leading whitespace and an asterisk will be stripped from the beginning of each line other than the first. Newlines are included in the output. Examples: optional int32 foo = 1; // Comment attached to foo. // Comment attached to bar. optional int32 bar = 2; optional string baz = 3; // Comment attached to baz. // Another line attached to baz. // Comment attached to qux. // // Another line attached to qux. optional double qux = 4; // Detached comment for corge. This is not leading or trailing comments // to qux or corge because there are blank lines separating it from // both. // Detached comment for corge paragraph 2. optional string corge = 5; /* Block comment attached * to corge. Leading asterisks * will be removed. */ /* Block comment attached to * grault. */ optional int32 grault = 6; // ignored detached comments. | | -| `trailingComments` | `string` | | | -| `leadingDetachedComments` | `[]string` | | | - - - - ---- -### GeneratedCodeInfo - - -Describes the relationship between generated code and its original source -file. A GeneratedCodeInfo message is associated with only one generated -source file, but may contain references to different source .proto files. - -```yaml -"annotation": []google.protobuf.GeneratedCodeInfo.Annotation - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `annotation` | [[]google.protobuf.GeneratedCodeInfo.Annotation](../descriptor.proto.sk#annotation) | An Annotation connects some span of text in generated code to an element of its generating .proto file. | | - - - - ---- -### Annotation - - - -```yaml -"path": []int -"sourceFile": string -"begin": int -"end": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `path` | `[]int` | Identifies the element in the original source .proto file. This field is formatted the same as SourceCodeInfo.Location.path. | | -| `sourceFile` | `string` | Identifies the filesystem path to the original source .proto. | | -| `begin` | `int` | Identifies the starting offset in bytes in the generated code that relates to the identified object. | | -| `end` | `int` | Identifies the ending offset in bytes in the generated code that relates to the identified offset. The end offset should be one past the last relevant byte (so the length of the text = end - begin). | | - - - - - - - - diff --git a/test/mocks/api/doc/google/protobuf/duration.proto.sk.md b/test/mocks/api/doc/google/protobuf/duration.proto.sk.md deleted file mode 100644 index 774281e52..000000000 --- a/test/mocks/api/doc/google/protobuf/duration.proto.sk.md +++ /dev/null @@ -1,137 +0,0 @@ - ---- -title: "duration.proto" -weight: 5 ---- - - - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -#### Types: - - -- [Duration](#duration) - - - - -##### Source File: `google/protobuf/duration.proto` - - - - - ---- -### Duration - - -A Duration represents a signed, fixed-length span of time represented -as a count of seconds and fractions of seconds at nanosecond -resolution. It is independent of any calendar and concepts like "day" -or "month". It is related to Timestamp in that the difference between -two Timestamp values is a Duration and it can be added or subtracted -from a Timestamp. Range is approximately +-10,000 years. - -# Examples - -Example 1: Compute Duration from two Timestamps in pseudo code. - - Timestamp start = ...; - Timestamp end = ...; - Duration duration = ...; - - duration.seconds = end.seconds - start.seconds; - duration.nanos = end.nanos - start.nanos; - - if (duration.seconds < 0 && duration.nanos > 0) { - duration.seconds += 1; - duration.nanos -= 1000000000; - } else if (durations.seconds > 0 && duration.nanos < 0) { - duration.seconds -= 1; - duration.nanos += 1000000000; - } - -Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. - - Timestamp start = ...; - Duration duration = ...; - Timestamp end = ...; - - end.seconds = start.seconds + duration.seconds; - end.nanos = start.nanos + duration.nanos; - - if (end.nanos < 0) { - end.seconds -= 1; - end.nanos += 1000000000; - } else if (end.nanos >= 1000000000) { - end.seconds += 1; - end.nanos -= 1000000000; - } - -Example 3: Compute Duration from datetime.timedelta in Python. - - td = datetime.timedelta(days=3, minutes=10) - duration = Duration() - duration.FromTimedelta(td) - -# JSON Mapping - -In JSON format, the Duration type is encoded as a string rather than an -object, where the string ends in the suffix "s" (indicating seconds) and -is preceded by the number of seconds, with nanoseconds expressed as -fractional seconds. For example, 3 seconds with 0 nanoseconds should be -encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -microsecond should be expressed in JSON format as "3.000001s". - -```yaml -"seconds": int -"nanos": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `seconds` | `int` | Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years | | -| `nanos` | `int` | Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive. | | - - - - - - - - diff --git a/test/mocks/api/doc/google/protobuf/struct.proto.sk.md b/test/mocks/api/doc/google/protobuf/struct.proto.sk.md deleted file mode 100644 index 27237edd3..000000000 --- a/test/mocks/api/doc/google/protobuf/struct.proto.sk.md +++ /dev/null @@ -1,158 +0,0 @@ - ---- -title: "struct.proto" -weight: 5 ---- - - - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -#### Types: - - -- [Struct](#struct) -- [Value](#value) -- [ListValue](#listvalue) - - - - -##### Enums: - - - - [NullValue](#nullvalue) - - - -##### Source File: `google/protobuf/struct.proto` - - - - - ---- -### Struct - - -`Struct` represents a structured data value, consisting of fields -which map to dynamically typed values. In some languages, `Struct` -might be supported by a native representation. For example, in -scripting languages like JS a struct is represented as an -object. The details of that representation are described together -with the proto support for the language. - -The JSON representation for `Struct` is JSON object. - -```yaml -"fields": map - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `fields` | `map` | Unordered map of dynamically typed values. | | - - - - ---- -### Value - - -`Value` represents a dynamically typed value which can be either -null, a number, a string, a boolean, a recursive struct value, or a -list of values. A producer of value is expected to set one of that -variants, absence of any variant indicates an error. - -The JSON representation for `Value` is JSON value. - -```yaml -"nullValue": .google.protobuf.NullValue -"numberValue": float -"stringValue": string -"boolValue": bool -"structValue": .google.protobuf.Struct -"listValue": .google.protobuf.ListValue - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `nullValue` | [.google.protobuf.NullValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/null-value) | Represents a null value. | | -| `numberValue` | `float` | Represents a double value. | | -| `stringValue` | `string` | Represents a string value. | | -| `boolValue` | `bool` | Represents a boolean value. | | -| `structValue` | [.google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/struct) | Represents a structured value. | | -| `listValue` | [.google.protobuf.ListValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/list-value) | Represents a repeated `Value`. | | - - - - ---- -### ListValue - - -`ListValue` is a wrapper around a repeated field of values. - -The JSON representation for `ListValue` is JSON array. - -```yaml -"values": []google.protobuf.Value - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `values` | [[]google.protobuf.Value](../struct.proto.sk#value) | Repeated field of dynamically typed values. | | - - - - -### NullValue - -Description: `NullValue` is a singleton enumeration to represent the null value for the -`Value` type union. - - The JSON representation for `NullValue` is JSON `null`. - -| Name | Description | -| ----- | ----------- | -| NULL_VALUE | Null value. | - - - - - diff --git a/test/mocks/api/doc/google/protobuf/timestamp.proto.sk.md b/test/mocks/api/doc/google/protobuf/timestamp.proto.sk.md deleted file mode 100644 index b714bfd37..000000000 --- a/test/mocks/api/doc/google/protobuf/timestamp.proto.sk.md +++ /dev/null @@ -1,155 +0,0 @@ - ---- -title: "timestamp.proto" -weight: 5 ---- - - - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -#### Types: - - -- [Timestamp](#timestamp) - - - - -##### Source File: `google/protobuf/timestamp.proto` - - - - - ---- -### Timestamp - - -A Timestamp represents a point in time independent of any time zone -or calendar, represented as seconds and fractions of seconds at -nanosecond resolution in UTC Epoch time. It is encoded using the -Proleptic Gregorian Calendar which extends the Gregorian calendar -backwards to year one. It is encoded assuming all minutes are 60 -seconds long, i.e. leap seconds are "smeared" so that no leap second -table is needed for interpretation. Range is from -0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -By restricting to that range, we ensure that we can convert to -and from RFC 3339 date strings. -See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). - -# Examples - -Example 1: Compute Timestamp from POSIX `time()`. - - Timestamp timestamp; - timestamp.set_seconds(time(NULL)); - timestamp.set_nanos(0); - -Example 2: Compute Timestamp from POSIX `gettimeofday()`. - - struct timeval tv; - gettimeofday(&tv, NULL); - - Timestamp timestamp; - timestamp.set_seconds(tv.tv_sec); - timestamp.set_nanos(tv.tv_usec * 1000); - -Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. - - FILETIME ft; - GetSystemTimeAsFileTime(&ft); - UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - - // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z - // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. - Timestamp timestamp; - timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); - timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); - -Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. - - long millis = System.currentTimeMillis(); - - Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) - .setNanos((int) ((millis % 1000) * 1000000)).build(); - - -Example 5: Compute Timestamp from current time in Python. - - timestamp = Timestamp() - timestamp.GetCurrentTime() - -# JSON Mapping - -In JSON format, the Timestamp type is encoded as a string in the -[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -where {year} is always expressed using four digits while {month}, {day}, -{hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -is required, though only UTC (as indicated by "Z") is presently supported. - -For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -01:30 UTC on January 15, 2017. - -In JavaScript, one can convert a Date object to this format using the -standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -method. In Python, a standard `datetime.datetime` object can be converted -to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -to obtain a formatter capable of generating timestamps in this format. - -```yaml -"seconds": int -"nanos": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `seconds` | `int` | Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. | | -| `nanos` | `int` | Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. | | - - - - - - - - diff --git a/test/mocks/api/doc/google/protobuf/wrappers.proto.sk.md b/test/mocks/api/doc/google/protobuf/wrappers.proto.sk.md deleted file mode 100644 index 2947f9779..000000000 --- a/test/mocks/api/doc/google/protobuf/wrappers.proto.sk.md +++ /dev/null @@ -1,252 +0,0 @@ - ---- -title: "wrappers.proto" -weight: 5 ---- - - - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Wrappers for primitive (non-message) types. These types are useful -for embedding primitives in the `google.protobuf.Any` type and for places -where we need to distinguish between the absence of a primitive -typed field and its default value. - - - -#### Types: - - -- [DoubleValue](#doublevalue) -- [FloatValue](#floatvalue) -- [Int64Value](#int64value) -- [UInt64Value](#uint64value) -- [Int32Value](#int32value) -- [UInt32Value](#uint32value) -- [BoolValue](#boolvalue) -- [StringValue](#stringvalue) -- [BytesValue](#bytesvalue) - - - - -##### Source File: `google/protobuf/wrappers.proto` - - - - - ---- -### DoubleValue - - -Wrapper message for `double`. - -The JSON representation for `DoubleValue` is JSON number. - -```yaml -"value": float - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `float` | The double value. | | - - - - ---- -### FloatValue - - -Wrapper message for `float`. - -The JSON representation for `FloatValue` is JSON number. - -```yaml -"value": float - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `float` | The float value. | | - - - - ---- -### Int64Value - - -Wrapper message for `int64`. - -The JSON representation for `Int64Value` is JSON string. - -```yaml -"value": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `int` | The int64 value. | | - - - - ---- -### UInt64Value - - -Wrapper message for `uint64`. - -The JSON representation for `UInt64Value` is JSON string. - -```yaml -"value": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `int` | The uint64 value. | | - - - - ---- -### Int32Value - - -Wrapper message for `int32`. - -The JSON representation for `Int32Value` is JSON number. - -```yaml -"value": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `int` | The int32 value. | | - - - - ---- -### UInt32Value - - -Wrapper message for `uint32`. - -The JSON representation for `UInt32Value` is JSON number. - -```yaml -"value": int - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `int` | The uint32 value. | | - - - - ---- -### BoolValue - - -Wrapper message for `bool`. - -The JSON representation for `BoolValue` is JSON `true` and `false`. - -```yaml -"value": bool - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `bool` | The bool value. | | - - - - ---- -### StringValue - - -Wrapper message for `string`. - -The JSON representation for `StringValue` is JSON string. - -```yaml -"value": string - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `string` | The string value. | | - - - - ---- -### BytesValue - - -Wrapper message for `bytes`. - -The JSON representation for `BytesValue` is JSON string. - -```yaml -"value": bytes - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `value` | `bytes` | The bytes value. | | - - - - - - - - diff --git a/test/mocks/api/doc/google/rpc/status.proto.sk.md b/test/mocks/api/doc/google/rpc/status.proto.sk.md deleted file mode 100644 index 54fa7c418..000000000 --- a/test/mocks/api/doc/google/rpc/status.proto.sk.md +++ /dev/null @@ -1,117 +0,0 @@ - ---- -title: "status.proto" -weight: 5 ---- - - - - -### Package: `google.rpc` -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - - -#### Types: - - -- [Status](#status) - - - - -##### Source File: `google/rpc/status.proto` - - - - - ---- -### Status - - -The `Status` type defines a logical error model that is suitable for different -programming environments, including REST APIs and RPC APIs. It is used by -[gRPC](https://github.com/grpc). The error model is designed to be: - -- Simple to use and understand for most users -- Flexible enough to meet unexpected needs - -# Overview - -The `Status` message contains three pieces of data: error code, error message, -and error details. The error code should be an enum value of -[google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The -error message should be a developer-facing English message that helps -developers *understand* and *resolve* the error. If a localized user-facing -error message is needed, put the localized message in the error details or -localize it in the client. The optional error details may contain arbitrary -information about the error. There is a predefined set of error detail types -in the package `google.rpc` that can be used for common error conditions. - -# Language mapping - -The `Status` message is the logical representation of the error model, but it -is not necessarily the actual wire format. When the `Status` message is -exposed in different client libraries and different wire protocols, it can be -mapped differently. For example, it will likely be mapped to some exceptions -in Java, but more likely mapped to some error codes in C. - -# Other uses - -The error model and the `Status` message can be used in a variety of -environments, either with or without APIs, to provide a -consistent developer experience across different environments. - -Example uses of this error model include: - -- Partial errors. If a service needs to return partial errors to the client, - it may embed the `Status` in the normal response to indicate the partial - errors. - -- Workflow errors. A typical workflow has multiple steps. Each step may - have a `Status` message for error reporting. - -- Batch operations. If a client uses batch request and batch response, the - `Status` message should be used directly inside batch response, one for - each error sub-response. - -- Asynchronous operations. If an API call embeds asynchronous operation - results in its response, the status of those operations should be - represented directly using the `Status` message. - -- Logging. If some API errors are stored in logs, the message `Status` could - be used directly after any stripping needed for security/privacy reasons. - -```yaml -"code": int -"message": string -"details": []google.protobuf.Any - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `code` | `int` | The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. | | -| `message` | `string` | A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. | | -| `details` | [[]google.protobuf.Any](../../protobuf/any.proto.sk#any) | A list of messages that carry the error details. There is a common set of message types for APIs to use. | | - - - - - - - - diff --git a/test/mocks/api/doc/testing.solo.io.project.sk.md b/test/mocks/api/doc/testing.solo.io.project.sk.md deleted file mode 100644 index cfc780b2f..000000000 --- a/test/mocks/api/doc/testing.solo.io.project.sk.md +++ /dev/null @@ -1,25 +0,0 @@ - ---- -title: "testing.solo.io.project" -weight: 5 ---- - - - - - -### API Reference for Solo-Kit Testing - -API Version: `testing.solo.io.v1` - - - -### API Resources: -- [AnotherMockResource](../github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk#anothermockresource) -- [ClusterResource](../github.com/solo-io/solo-kit/test/mocks/api/v1/more_mock_resources.proto.sk#clusterresource) -- [FakeResource](../github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk#fakeresource) -- [MockResource](../github.com/solo-io/solo-kit/test/mocks/api/v1/mock_resources.proto.sk#mockresource) - - - - diff --git a/test/mocks/api/doc/envoy/api/v2/core/base.proto.sk.md b/test/mocks/docs/envoy/api/v2/core/base.proto.sk.md similarity index 85% rename from test/mocks/api/doc/envoy/api/v2/core/base.proto.sk.md rename to test/mocks/docs/envoy/api/v2/core/base.proto.sk.md index 36f29783c..518b83d42 100644 --- a/test/mocks/api/doc/envoy/api/v2/core/base.proto.sk.md +++ b/test/mocks/docs/envoy/api/v2/core/base.proto.sk.md @@ -1,29 +1,22 @@ - ---- -title: "base.proto" -weight: 5 ---- - - ### Package: `envoy.api.v2.core` #### Types: -- [Locality](#locality) -- [Node](#node) -- [Metadata](#metadata) -- [RuntimeUInt32](#runtimeuint32) -- [HeaderValue](#headervalue) -- [HeaderValueOption](#headervalueoption) -- [HeaderMap](#headermap) -- [DataSource](#datasource) -- [TransportSocket](#transportsocket) -- [SocketOption](#socketoption) -- [SocketState](#socketstate) -- [RuntimeFractionalPercent](#runtimefractionalpercent) -- [ControlPlane](#controlplane) +- [Locality](#Locality) +- [Node](#Node) +- [Metadata](#Metadata) +- [RuntimeUInt32](#RuntimeUInt32) +- [HeaderValue](#HeaderValue) +- [HeaderValueOption](#HeaderValueOption) +- [HeaderMap](#HeaderMap) +- [DataSource](#DataSource) +- [TransportSocket](#TransportSocket) +- [SocketOption](#SocketOption) +- [SocketState](#SocketState) +- [RuntimeFractionalPercent](#RuntimeFractionalPercent) +- [ControlPlane](#ControlPlane) @@ -31,8 +24,8 @@ weight: 5 ##### Enums: - - [RoutingPriority](#routingpriority) - - [RequestMethod](#requestmethod) + - [RoutingPriority](#RoutingPriority) + - [RequestMethod](#RequestMethod) @@ -43,7 +36,7 @@ weight: 5 --- -### Locality +### Locality Identifies location of where either Envoy runs or where upstream hosts run. @@ -65,7 +58,7 @@ Identifies location of where either Envoy runs or where upstream hosts run. --- -### Node +### Node Identifies a specific Envoy instance. The node identifier is presented to the @@ -86,14 +79,14 @@ configuration for serving. | `id` | `string` | An opaque node identifier for the Envoy node. This also provides the local service node name. It should be set if any of the following features are used: :ref:`statsd `, :ref:`CDS `, and :ref:`HTTP tracing `, either in this message or via :option:`--service-node`. | | | `cluster` | `string` | Defines the local service cluster name where Envoy is running. Though optional, it should be set if any of the following features are used: :ref:`statsd `, :ref:`health check cluster verification `, :ref:`runtime override directory `, :ref:`user agent addition `, :ref:`HTTP global rate limiting `, :ref:`CDS `, and :ref:`HTTP tracing `, either in this message or via :option:`--service-cluster`. | | | `metadata` | [.google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/struct) | Opaque metadata extending the node identifier. Envoy will pass this directly to the management server. | | -| `locality` | [.envoy.api.v2.core.Locality](../base.proto.sk#locality) | Locality specifying where the Envoy instance is running. | | +| `locality` | [.envoy.api.v2.core.Locality](base.proto.sk.md#Locality) | Locality specifying where the Envoy instance is running. | | | `buildVersion` | `string` | This is motivated by informing a management server during canary which version of Envoy is being tested in a heterogeneous fleet. This will be set by Envoy in management server RPCs. | | --- -### Metadata +### Metadata Metadata provides additional inputs to filters based on matched listeners, @@ -131,7 +124,7 @@ this purpose: --- -### RuntimeUInt32 +### RuntimeUInt32 Runtime derived uint32 with a default when not specified. @@ -151,7 +144,7 @@ Runtime derived uint32 with a default when not specified. --- -### HeaderValue +### HeaderValue Header name/value pair. @@ -171,7 +164,7 @@ Header name/value pair. --- -### HeaderValueOption +### HeaderValueOption Header name/value pair plus option to control append behavior. @@ -184,14 +177,14 @@ Header name/value pair plus option to control append behavior. | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | -| `header` | [.envoy.api.v2.core.HeaderValue](../base.proto.sk#headervalue) | Header name/value pair that this option applies to. | | +| `header` | [.envoy.api.v2.core.HeaderValue](base.proto.sk.md#HeaderValue) | Header name/value pair that this option applies to. | | | `append` | [.google.protobuf.BoolValue](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/bool-value) | Should the value be appended? If true (default), the value is appended to existing values. | | --- -### HeaderMap +### HeaderMap Wrapper for a set of headers. @@ -203,13 +196,13 @@ Wrapper for a set of headers. | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | -| `headers` | [[]envoy.api.v2.core.HeaderValue](../base.proto.sk#headervalue) | | | +| `headers` | [[]envoy.api.v2.core.HeaderValue](base.proto.sk.md#HeaderValue) | | | --- -### DataSource +### DataSource Data source consisting of either a file or an inline value. @@ -231,7 +224,7 @@ Data source consisting of either a file or an inline value. --- -### TransportSocket +### TransportSocket Configuration for transport socket in :ref:`listeners ` and @@ -256,7 +249,7 @@ chosen based on the platform and existence of tls_context. --- -### SocketOption +### SocketOption Generic socket option message. This would be used to set socket options that @@ -279,13 +272,13 @@ might not exist in upstream kernels or precompiled Envoy binaries. | `name` | `int` | The numeric name as passed to setsockopt | | | `intValue` | `int` | Because many sockopts take an int value. | | | `bufValue` | `bytes` | Otherwise it's a byte buffer. | | -| `state` | [.envoy.api.v2.core.SocketOption.SocketState](../base.proto.sk#socketstate) | The state in which the option will be applied. When used in BindConfig STATE_PREBIND is currently the only valid value. | | +| `state` | [.envoy.api.v2.core.SocketOption.SocketState](base.proto.sk.md#SocketState) | The state in which the option will be applied. When used in BindConfig STATE_PREBIND is currently the only valid value. | | --- -### SocketState +### SocketState @@ -299,7 +292,7 @@ might not exist in upstream kernels or precompiled Envoy binaries. --- -### RuntimeFractionalPercent +### RuntimeFractionalPercent Runtime derived FractionalPercent with defaults for when the numerator or denominator is not @@ -313,14 +306,14 @@ specified via a runtime key. | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | -| `defaultValue` | [.envoy.type.FractionalPercent](../../../../type/percent.proto.sk#fractionalpercent) | Default value if the runtime value's for the numerator/denominator keys are not available. | | +| `defaultValue` | [.envoy.type.FractionalPercent](../../../../github.com/solo-io/solo-kit/api/external/envoy/type/percent.proto.sk.md#FractionalPercent) | Default value if the runtime value's for the numerator/denominator keys are not available. | | | `runtimeKey` | `string` | Runtime key for a YAML representation of a FractionalPercent. | | --- -### ControlPlane +### ControlPlane Identifies a specific ControlPlane instance that Envoy is connected to. @@ -337,7 +330,7 @@ Identifies a specific ControlPlane instance that Envoy is connected to. -### RoutingPriority +### RoutingPriority Description: Envoy supports :ref:`upstream priority routing ` both at the route and the virtual @@ -352,7 +345,7 @@ over a single upstream connection. | DEFAULT | | | HIGH | | -### RequestMethod +### RequestMethod Description: HTTP request method. diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md index e57357ab6..6591be92e 100644 --- a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/envoy/api/v2/discovery.proto.sk.md @@ -66,7 +66,7 @@ a given Envoy node on some API. | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | | `versionInfo` | `string` | The version of the response data. | | -| `resources` | [[]google.protobuf.Any](../../../google/protobuf/any.proto.sk.md#Any) | The response resources. These resources are typed and depend on the API being called. | | +| `resources` | [[]google.protobuf.Any](../../../../../../../../google/protobuf/any.proto.sk.md#Any) | The response resources. These resources are typed and depend on the API being called. | | | `canary` | `bool` | [#not-implemented-hide:] Canary is used to support two Envoy command line flags: * --terminate-on-canary-transition-failure. When set, Envoy is able to terminate if it detects that configuration is stuck at canary. Consider this example sequence of updates: - Management server applies a canary config successfully. - Management server rolls back to a production config. - Envoy rejects the new production config. Since there is no sensible way to continue receiving configuration updates, Envoy will then terminate and apply production config from a clean slate. * --dry-run-canary. When set, a canary response will never be applied, only validated via a dry run. | | | `typeUrl` | `string` | Type URL for resources. This must be consistent with the type_url in the Any messages for resources if resources is non-empty. This effectively identifies the xDS API when muxing over ADS. | | | `nonce` | `string` | For gRPC based subscriptions, the nonce provides a way to explicitly ack a specific DiscoveryResponse in a following DiscoveryRequest. Additional messages may have been sent by Envoy to the management server for the previous version on the stream prior to this DiscoveryResponse, that were unprocessed at response send time. The nonce allows the management server to ignore any further DiscoveryRequests for the previous version until a DiscoveryRequest bearing the nonce. The nonce is optional and is not required for non-stream based xDS implementations. | | diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md deleted file mode 100644 index 4b615defd..000000000 --- a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto.sk.md +++ /dev/null @@ -1,141 +0,0 @@ - - -### Package: `google.protobuf` -Protocol Buffers - Google's data interchange format -Copyright 2008 Google Inc. All rights reserved. -https://developers.google.com/protocol-buffers/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -#### Types: - - -- [Any](#Any) - - - - -##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/any.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/any.proto) - - - - - ---- -### Any - - -`Any` contains an arbitrary serialized protocol buffer message along with a -URL that describes the type of the serialized message. - -Protobuf library provides support to pack/unpack Any values in the form -of utility functions or additional generated methods of the Any type. - -Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - -Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - -The pack methods provided by protobuf library will by default use -'type.googleapis.com/full.type.name' as the type URL and the unpack -methods only use the fully qualified type name after the last '/' -in the type URL, for example "foo.bar.com/x/y.z" will yield type -name "y.z". - - -JSON -==== -The JSON representation of an `Any` value uses the regular -representation of the deserialized, embedded message, with an -additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - -If the embedded message type is well-known and has a custom JSON -representation, that representation will be embedded adding a field -`value` which holds the custom JSON in addition to the `@type` -field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - -```yaml -"typeUrl": string -"value": bytes - -``` - -| Field | Type | Description | Default | -| ----- | ---- | ----------- |----------- | -| `typeUrl` | `string` | A URL/resource name whose content describes the type of the serialized protocol buffer message. For URLs which use the scheme `http`, `https`, or no scheme, the following restrictions and interpretations apply: * If no scheme is provided, `https` is assumed. * The last segment of the URL's path must represent the fully qualified name of the type (as in `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading "." is not accepted). * An HTTP GET on the URL must yield a [google.protobuf.Type][] value in binary format, or produce an error. * Applications are allowed to cache lookup results based on the URL, or have them precompiled into a binary to avoid any lookup. Therefore, binary compatibility needs to be preserved on changes to types. (Use versioned type names to manage breaking changes.) Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics. | | -| `value` | `bytes` | Must be a valid serialized protocol buffer of the above specified type. | | - - - - - - - - diff --git a/test/mocks/docs/google/protobuf/descriptor.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md similarity index 99% rename from test/mocks/docs/google/protobuf/descriptor.proto.sk.md rename to test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md index 410c59cce..a4f7351ac 100644 --- a/test/mocks/docs/google/protobuf/descriptor.proto.sk.md +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto.sk.md @@ -78,7 +78,7 @@ without any other information (e.g. without reading its imports). -##### Source File: `google/protobuf/descriptor.proto` +##### Source File: [github.com/solo-io/solo-kit/api/external/google/protobuf/descriptor.proto](https://github.com/solo-io/solo-kit/blob/master/api/external/google/protobuf/descriptor.proto) diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md index 4e59958a4..c8e82bd53 100644 --- a/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md +++ b/test/mocks/docs/github.com/solo-io/solo-kit/api/external/google/rpc/status.proto.sk.md @@ -99,7 +99,7 @@ Example uses of this error model include: | ----- | ---- | ----------- |----------- | | `code` | `int` | The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. | | | `message` | `string` | A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. | | -| `details` | [[]google.protobuf.Any](../protobuf/any.proto.sk.md#Any) | A list of messages that carry the error details. There is a common set of message types for APIs to use. | | +| `details` | [[]google.protobuf.Any](../../../../../../../google/protobuf/any.proto.sk.md#Any) | A list of messages that carry the error details. There is a common set of message types for APIs to use. | | diff --git a/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md b/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md index 06577862d..7cc8e2654 100644 --- a/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md +++ b/test/mocks/docs/github.com/solo-io/solo-kit/pkg/api/v1/apiserver/api_server.proto.sk.md @@ -170,7 +170,7 @@ GRPC stuff | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | -| `resourceList` | [[]google.protobuf.Any](../../../../api/external/google/protobuf/any.proto.sk.md#Any) | | | +| `resourceList` | [[]google.protobuf.Any](../../../../../../../google/protobuf/any.proto.sk.md#Any) | | | @@ -206,7 +206,7 @@ GRPC stuff | Field | Type | Description | Default | | ----- | ---- | ----------- |----------- | -| `resourceList` | [[]google.protobuf.Any](../../../../api/external/google/protobuf/any.proto.sk.md#Any) | | | +| `resourceList` | [[]google.protobuf.Any](../../../../../../../google/protobuf/any.proto.sk.md#Any) | | | diff --git a/test/mocks/api/doc/google/protobuf/any.proto.sk.md b/test/mocks/docs/google/protobuf/any.proto.sk.md similarity index 98% rename from test/mocks/api/doc/google/protobuf/any.proto.sk.md rename to test/mocks/docs/google/protobuf/any.proto.sk.md index 58d95c111..03c64f4fb 100644 --- a/test/mocks/api/doc/google/protobuf/any.proto.sk.md +++ b/test/mocks/docs/google/protobuf/any.proto.sk.md @@ -1,12 +1,5 @@ - ---- -title: "any.proto" -weight: 5 ---- - - ### Package: `google.protobuf` Protocol Buffers - Google's data interchange format Copyright 2008 Google Inc. All rights reserved. @@ -43,7 +36,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #### Types: -- [Any](#any) +- [Any](#Any) @@ -55,7 +48,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -### Any +### Any `Any` contains an arbitrary serialized protocol buffer message along with a diff --git a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go deleted file mode 100644 index 67ac556a2..000000000 --- a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" -) - -var ( - mTestingSnapshotIn = stats.Int64("testing.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") - mTestingSnapshotOut = stats.Int64("testing.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") - mTestingSnapshotMissed = stats.Int64("testing.solo.io/snap_emitter/snap_missed", "The number of snapshots missed", "1") - - testingsnapshotInView = &view.View{ - Name: "testing.solo.io_snap_emitter/snap_in", - Measure: mTestingSnapshotIn, - Description: "The number of snapshots updates coming in", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotOutView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_out", - Measure: mTestingSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotMissedView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_missed", - Measure: mTestingSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register(testingsnapshotInView, testingsnapshotOutView, testingsnapshotMissedView) -} - -type TestingEmitter interface { - Register() error - MockResource() MockResourceClient - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) -} - -func NewTestingEmitter(mockResourceClient MockResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, make(chan struct{})) -} - -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, emit <-chan struct{}) TestingEmitter { - return &testingEmitter{ - mockResource: mockResourceClient, - forceEmit: emit, - } -} - -type testingEmitter struct { - forceEmit <-chan struct{} - mockResource MockResourceClient -} - -func (c *testingEmitter) Register() error { - if err := c.mockResource.Register(); err != nil { - return err - } - return nil -} - -func (c *testingEmitter) MockResource() MockResourceClient { - return c.mockResource -} - -func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for MockResource */ - type mockResourceListWithNamespace struct { - list MockResourceList - namespace string - } - mockResourceChan := make(chan mockResourceListWithNamespace) - - var initialMockResourceList MockResourceList - - currentSnapshot := TestingSnapshot{} - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - { - mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockResource list") - } - initialMockResourceList = append(initialMockResourceList, mocks...) - } - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case mockResourceList := <-mockResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Mocks */ - currentSnapshot.Mocks = initialMockResourceList.Sort() - - snapshots := make(chan *TestingSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - originalSnapshot := TestingSnapshot{} - timer := time.NewTicker(time.Second * 1) - - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mTestingSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - default: - stats.Record(ctx, mTestingSnapshotMissed.M(1)) - } - } - mocksByNamespace := make(map[string]MockResourceList) - - for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - close(snapshots) - done.Wait() - close(errs) - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case mockResourceNamespacedList := <-mockResourceChan: - record() - - namespace := mockResourceNamespacedList.namespace - - // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { - mockResourceList = append(mockResourceList, mocks...) - } - currentSnapshot.Mocks = mockResourceList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go deleted file mode 100644 index 9c64b7157..000000000 --- a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go +++ /dev/null @@ -1,263 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v2alpha1 - -import ( - "sync" - "time" - - testing_solo_io "github.com/solo-io/solo-kit/test/mocks/v1" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" -) - -var ( - mTestingSnapshotIn = stats.Int64("testing.solo.io/snap_emitter/snap_in", "The number of snapshots in", "1") - mTestingSnapshotOut = stats.Int64("testing.solo.io/snap_emitter/snap_out", "The number of snapshots out", "1") - mTestingSnapshotMissed = stats.Int64("testing.solo.io/snap_emitter/snap_missed", "The number of snapshots missed", "1") - - testingsnapshotInView = &view.View{ - Name: "testing.solo.io_snap_emitter/snap_in", - Measure: mTestingSnapshotIn, - Description: "The number of snapshots updates coming in", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotOutView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_out", - Measure: mTestingSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - testingsnapshotMissedView = &view.View{ - Name: "testing.solo.io/snap_emitter/snap_missed", - Measure: mTestingSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register(testingsnapshotInView, testingsnapshotOutView, testingsnapshotMissedView) -} - -type TestingEmitter interface { - Register() error - MockResource() MockResourceClient - FakeResource() testing_solo_io.FakeResourceClient - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) -} - -func NewTestingEmitter(mockResourceClient MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, fakeResourceClient, make(chan struct{})) -} - -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, emit <-chan struct{}) TestingEmitter { - return &testingEmitter{ - mockResource: mockResourceClient, - fakeResource: fakeResourceClient, - forceEmit: emit, - } -} - -type testingEmitter struct { - forceEmit <-chan struct{} - mockResource MockResourceClient - fakeResource testing_solo_io.FakeResourceClient -} - -func (c *testingEmitter) Register() error { - if err := c.mockResource.Register(); err != nil { - return err - } - if err := c.fakeResource.Register(); err != nil { - return err - } - return nil -} - -func (c *testingEmitter) MockResource() MockResourceClient { - return c.mockResource -} - -func (c *testingEmitter) FakeResource() testing_solo_io.FakeResourceClient { - return c.fakeResource -} - -func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for MockResource */ - type mockResourceListWithNamespace struct { - list MockResourceList - namespace string - } - mockResourceChan := make(chan mockResourceListWithNamespace) - - var initialMockResourceList MockResourceList - /* Create channel for FakeResource */ - type fakeResourceListWithNamespace struct { - list testing_solo_io.FakeResourceList - namespace string - } - fakeResourceChan := make(chan fakeResourceListWithNamespace) - - var initialFakeResourceList testing_solo_io.FakeResourceList - - currentSnapshot := TestingSnapshot{} - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - { - mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockResource list") - } - initialMockResourceList = append(initialMockResourceList, mocks...) - } - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - /* Setup namespaced watch for FakeResource */ - { - fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial FakeResource list") - } - initialFakeResourceList = append(initialFakeResourceList, fakes...) - } - fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting FakeResource watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case mockResourceList := <-mockResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: - } - case fakeResourceList := <-fakeResourceNamespacesChan: - select { - case <-ctx.Done(): - return - case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Mocks */ - currentSnapshot.Mocks = initialMockResourceList.Sort() - /* Initialize snapshot for Fakes */ - currentSnapshot.Fakes = initialFakeResourceList.Sort() - - snapshots := make(chan *TestingSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - originalSnapshot := TestingSnapshot{} - timer := time.NewTicker(time.Second * 1) - - sync := func() { - if originalSnapshot.Hash() == currentSnapshot.Hash() { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mTestingSnapshotOut.M(1)) - originalSnapshot = currentSnapshot.Clone() - default: - stats.Record(ctx, mTestingSnapshotMissed.M(1)) - } - } - mocksByNamespace := make(map[string]MockResourceList) - fakesByNamespace := make(map[string]testing_solo_io.FakeResourceList) - - for { - record := func() { stats.Record(ctx, mTestingSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - close(snapshots) - done.Wait() - close(errs) - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case mockResourceNamespacedList := <-mockResourceChan: - record() - - namespace := mockResourceNamespacedList.namespace - - // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list - var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { - mockResourceList = append(mockResourceList, mocks...) - } - currentSnapshot.Mocks = mockResourceList.Sort() - case fakeResourceNamespacedList := <-fakeResourceChan: - record() - - namespace := fakeResourceNamespacedList.namespace - - // merge lists by namespace - fakesByNamespace[namespace] = fakeResourceNamespacedList.list - var fakeResourceList testing_solo_io.FakeResourceList - for _, fakes := range fakesByNamespace { - fakeResourceList = append(fakeResourceList, fakes...) - } - currentSnapshot.Fakes = fakeResourceList.Sort() - } - } - }() - return snapshots, errs, nil -} From 48a812e8122b1d9dd93911875d875fe860b304d3 Mon Sep 17 00:00:00 2001 From: changelog-bot Date: Fri, 25 Oct 2019 14:41:50 +0000 Subject: [PATCH 16/17] Adding changelog file to new location --- changelog/v0.12.0/conversions.yaml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/v0.12.0/conversions.yaml diff --git a/changelog/v0.12.0/conversions.yaml b/changelog/v0.12.0/conversions.yaml new file mode 100644 index 000000000..4e9788768 --- /dev/null +++ b/changelog/v0.12.0/conversions.yaml @@ -0,0 +1,7 @@ +changelog: + - type: NEW_FEATURE + description: Generate converters for multi-version resources. + issueLink: https://github.com/solo-io/solo-kit/issues/215 + - type: BREAKING_CHANGE + description: Redefine solo-kit project concept to better support multi-version resources. + issueLink: https://github.com/solo-io/solo-kit/issues/215 \ No newline at end of file From a948008d0095d5ac45327957c323e83494b15944 Mon Sep 17 00:00:00 2001 From: changelog-bot Date: Fri, 25 Oct 2019 14:41:51 +0000 Subject: [PATCH 17/17] Deleting changelog file from old location --- changelog/v0.11.0/conversions.yaml | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 changelog/v0.11.0/conversions.yaml diff --git a/changelog/v0.11.0/conversions.yaml b/changelog/v0.11.0/conversions.yaml deleted file mode 100644 index 4e9788768..000000000 --- a/changelog/v0.11.0/conversions.yaml +++ /dev/null @@ -1,7 +0,0 @@ -changelog: - - type: NEW_FEATURE - description: Generate converters for multi-version resources. - issueLink: https://github.com/solo-io/solo-kit/issues/215 - - type: BREAKING_CHANGE - description: Redefine solo-kit project concept to better support multi-version resources. - issueLink: https://github.com/solo-io/solo-kit/issues/215 \ No newline at end of file